max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
beansdbadmin/core/client.py | ariesdevil/beansdbadmin | 11 | 400 | <filename>beansdbadmin/core/client.py
#!/usr/bin/python
# encoding: utf-8
'''a rich client
1. for one server (instead of multi like in libmc.Client)
2. encapsulate @, ?, gc ...
use is instead of libmc.Client
'''
import telnetlib
import logging
import libmc
import string
import urllib
import itertools
import warnings
from collections import defaultdict
from beansdbadmin.core.hint import parse_new_hint_body
from beansdbadmin.core.data import parse_records
from beansdbadmin.core.hash import get_khash64
def get_url_content(url):
return urllib.urlopen(url).read()
def check_bucket(bucket):
assert 0 <= bucket < 16
def dir_to_dict(dir_str):
d = dict()
if dir_str:
for line in [x for x in dir_str.split('\n') if x]:
key_or_bucket, _hash, ver_or_count = line.split(' ')
d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count)
return d
def get_bucket_keys_count(store, bucket, depth=1):
cmd = "@"
sub = bucket
if depth == 2:
cmd = "@%x" % (bucket/16)
sub = bucket % 16
result = store.get(cmd)
if result:
lines = result.split('\n')
for line in lines:
if len(line) == 0:
continue
d, _, c = line.split()
if d.endswith('/'):
bucket_ = int(d[0], 16)
if bucket_ == sub:
return int(c)
raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket))
def get_buckets_keys_count(store):
""" return dict: buckets -> count """
st = {}
try:
for line in (store.get('@') or '').split('\n'):
if line:
d, _, c = line.split(' ')
if not d.endswith('/'):
continue
st[int(d[0], 16)] = int(c)
return st
except IOError:
raise Exception("cannot get @ from %s" % (store))
def get_primary_buckets(store):
""" return possible primary buckets, might be wrong on temporary nodes,
result is list of buckets in integer
"""
ss = get_buckets_keys_count(store)
bucket_list = ss.items()
bucket_list = [x for x in bucket_list if x[1] > 0]
if not bucket_list:
return None
bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True)
result = [bucket_list[0]]
for i in bucket_list[1:]:
if result[-1][1] / i[1] >= 2:
break
result.append(i)
return [x[0] for x in result]
def get_key_info_disk(store, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
info = store.get('??' + key)
if info:
return [int(x) for x in info.split()]
def is_gc_running(ip, port):
s = get_gc_status(ip, port)
if s and s.find('running') >= 0:
return True
return False
def get_gc_status(ip, port):
t = telnetlib.Telnet(ip, port)
t.write('optimize_stat\r\n')
out = t.read_until('\n')
t.write('quit\r\n')
t.close()
return out.strip("\r\n")
def connect(server, **kwargs):
comp_threshold = kwargs.pop('comp_threshold', 0)
prefix = kwargs.pop('prefix', None)
if prefix is not None:
warnings.warn('"prefix" is deprecated. '
'use douban.wrapper.Prefix instead.')
c = libmc.Client([server],
do_split=0,
comp_threshold=comp_threshold,
prefix=prefix)
c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s
c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
return c
class MCStore(object):
IGNORED_LIBMC_RET = frozenset([
libmc.MC_RETURN_OK,
libmc.MC_RETURN_INVALID_KEY_ERR
])
def __init__(self, addr):
self.addr = addr
self.host, port = addr.split(":")
self.port = int(port)
self.mc = connect(addr)
def __repr__(self):
return '<MCStore(addr=%s)>' % repr(self.addr)
def __str__(self):
return self.addr
def set(self, key, data, rev=0):
return bool(self.mc.set(key, data, rev))
def set_raw(self, key, data, rev=0, flag=0):
if rev < 0:
raise Exception(str(rev))
return self.mc.set_raw(key, data, rev, flag)
def set_multi(self, values, return_failure=False):
return self.mc.set_multi(values, return_failure=return_failure)
def _check_last_error(self):
last_err = self.mc.get_last_error()
if last_err not in self.IGNORED_LIBMC_RET:
raise IOError(last_err, self.mc.get_last_strerror())
def get(self, key):
try:
r = self.mc.get(key)
if r is None:
self._check_last_error()
return r
except ValueError:
self.mc.delete(key)
def get_raw(self, key):
r, flag = self.mc.get_raw(key)
if r is None:
self._check_last_error()
return r, flag
def get_multi(self, keys):
r = self.mc.get_multi(keys)
self._check_last_error()
return r
def delete(self, key):
return bool(self.mc.delete(key))
def delete_multi(self, keys, return_failure=False):
return self.mc.delete_multi(keys, return_failure=return_failure)
def exists(self, key):
return bool(self.mc.get('?' + key))
def incr(self, key, value):
return self.mc.incr(key, int(value))
class DBClient(MCStore):
def __init__(self, addr):
MCStore.__init__(self, addr)
self._is_old = None
def stats(self):
stats = self.mc.stats()
return stats.values()[0] if stats else None
def is_old(self):
if self._is_old is None:
ver = self.get_server_version()
self._is_old = (ver.strip().split(".")[0] == "0")
return self._is_old
def get_collision_summary(self, bucket):
check_bucket(bucket)
raw = self.get("@collision_%x" % bucket)
if raw is None:
return None
count, hcount, khash, data_size = raw.split()
return (int(count), int(hcount), int(khash, 16), int(data_size))
def get_collision(self, bucket):
check_bucket(bucket)
collisions = defaultdict(dict)
hint_data = self.get("@collision_all_%x" % bucket)
if hint_data is None:
return dict()
for key, meta, _ in parse_new_hint_body(hint_data):
khash_str, _, ver, vhash = meta
collisions[khash_str][key] = (vhash, ver)
return dict(collisions)
def get_records_by_khash_raw(self, khash):
if self.is_old():
return []
if not isinstance(khash, str):
khash = "%016x" % khash
return self.get("@@" + khash)
def get_records_by_khash(self, khash_str):
raw = self.get_records_by_khash_raw(khash_str)
if raw:
return parse_records(raw, False)
else:
return []
def start_gc(self, bucket='', start_fid=0, end_fid=None):
""" bucket must be in 0 or 00 string """
if bucket:
assert isinstance(bucket, basestring) and len(bucket) <= 2
t = telnetlib.Telnet(self.host, self.port)
tree = '@%s' % bucket
if end_fid is None:
gc_cmd = 'gc {} {}\n'.format(tree, start_fid)
else:
gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid)
t.write(gc_cmd)
out = t.read_until('\n').strip('\r\n')
assert out == 'OK'
t.write('quit\n')
t.close()
def start_gc_all_buckets(self, db_depth):
hex_digits = string.digits + 'abcdef'
buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)])
buckets = [''.join(i) for i in buckets_iter]
self.start_gc_buckets(buckets)
def start_gc_buckets(self, buckets):
for b in buckets:
self.start_gc(bucket=b)
while True:
status = self.get_gc_status()
if status.find('running') >= 0:
continue
elif status == 'success':
print "bucket %s gc done" % b
break
elif status == 'fail':
return self.fail("optimize_stat = fail")
else:
self.fail(status)
def get_gc_status(self):
return get_gc_status(self.host, self.port)
def get_version(self, key):
meta = self.get("?" + key)
if meta:
return int(meta.split()[0])
def item_count(self):
s = self.stats()
if s is None:
return None
return int(s['total_items'])
def get_key_info_mem(self, key, khash64=None):
''' return (vhash, ver) or None'''
if khash64 is None:
khash64 = get_khash64(key)
khash32_str = "@%08x" % (khash64 >> 32)
_dir = self.get_dir(khash32_str)
if self.is_old():
return _dir.get(key, None)
else:
return _dir.get("%016x" % khash64, None)
def get_khash_info_mem(self, khash):
''' return [(key, (vhash, ver))], key is "" for v2.'''
khash32 = "@%08x" % (khash >> 32)
_dir = self.get_dir(khash32)
ret = []
if self.is_old():
for k, (vhash, ver) in _dir.iteritems():
if get_khash64(k) == khash:
ret.append((k, (vhash, ver)))
else:
for k, (vhash, ver) in _dir.iteritems():
if int(k, 16) == khash:
return [("", (int(vhash), ver))]
return ret
def get_server_version(self):
try:
st = self.stats()
if st:
return st["version"]
except IOError:
logging.error("fail to get version %s", self)
except KeyError:
logging.error("fail to get version %s %s", self, st)
def get_dir(self, path):
''' return dict
case1: map dir(0-f) to (hash, count),
like {'0/': (1471, 27784005), ... },
case2: map key(or khash) to (vhash, version),
like {'3000000377e9c2ad': (22212, 1), ... }'''
try:
content = self.get(path)
except IOError:
content = ''
return dir_to_dict(content)
def list_dir(self, d): # FIXME: d should not need prefix @?
'''list all KEY in the dir!
not use it if dir is large!'''
for path, (vhash, ver) in sorted(self.get_dir(d).items()):
if path.endswith('/') and len(path) == 2:
for v in self.list_dir(d + path[:-1]):
yield v
else:
yield path, int(vhash), int(ver)
def get_bucket_keys_count(self, bucket, depth=1):
return get_bucket_keys_count(self, bucket, depth)
def get_key_info_disk(self, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
return get_key_info_disk(self, key)
def prepare(self, data):
return libmc.encode_value(data, self.mc.comp_threshold)
def close(self):
pass
def test_new(addr, bucket):
b = bucket
c = DBClient(addr)
print "stats:", c.stats()
print 'version:', c.get_server_version()
print "isold:", c.is_old()
print "dir root:", c.get_dir("@")
print "bucket key count:", c.get_bucket_keys_count(int(b))
print "item_count:", c.item_count()
print "primary_buckets", get_primary_buckets(c)
leaf = c.get_dir("@" + b + "000000")
print "a dir leaf:", leaf
khash_str = list(leaf)[0]
print "a khash_str", khash_str
r = c.get_records_by_khash(khash_str)[0]
k = r[0]
print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:]
print "key info mem:", c.get_key_info_mem(k)
print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \
c.get_key_info_disk(k)
print "key version:", c.get_version(k)
print "collision_summary", c.get_collision_summary(int(b))
print "gc status:", c.get_gc_status()
if __name__ == '__main__':
test_new("rosa3a:7900", '3')
| <filename>beansdbadmin/core/client.py
#!/usr/bin/python
# encoding: utf-8
'''a rich client
1. for one server (instead of multi like in libmc.Client)
2. encapsulate @, ?, gc ...
use is instead of libmc.Client
'''
import telnetlib
import logging
import libmc
import string
import urllib
import itertools
import warnings
from collections import defaultdict
from beansdbadmin.core.hint import parse_new_hint_body
from beansdbadmin.core.data import parse_records
from beansdbadmin.core.hash import get_khash64
def get_url_content(url):
return urllib.urlopen(url).read()
def check_bucket(bucket):
assert 0 <= bucket < 16
def dir_to_dict(dir_str):
d = dict()
if dir_str:
for line in [x for x in dir_str.split('\n') if x]:
key_or_bucket, _hash, ver_or_count = line.split(' ')
d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count)
return d
def get_bucket_keys_count(store, bucket, depth=1):
cmd = "@"
sub = bucket
if depth == 2:
cmd = "@%x" % (bucket/16)
sub = bucket % 16
result = store.get(cmd)
if result:
lines = result.split('\n')
for line in lines:
if len(line) == 0:
continue
d, _, c = line.split()
if d.endswith('/'):
bucket_ = int(d[0], 16)
if bucket_ == sub:
return int(c)
raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket))
def get_buckets_keys_count(store):
""" return dict: buckets -> count """
st = {}
try:
for line in (store.get('@') or '').split('\n'):
if line:
d, _, c = line.split(' ')
if not d.endswith('/'):
continue
st[int(d[0], 16)] = int(c)
return st
except IOError:
raise Exception("cannot get @ from %s" % (store))
def get_primary_buckets(store):
""" return possible primary buckets, might be wrong on temporary nodes,
result is list of buckets in integer
"""
ss = get_buckets_keys_count(store)
bucket_list = ss.items()
bucket_list = [x for x in bucket_list if x[1] > 0]
if not bucket_list:
return None
bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True)
result = [bucket_list[0]]
for i in bucket_list[1:]:
if result[-1][1] / i[1] >= 2:
break
result.append(i)
return [x[0] for x in result]
def get_key_info_disk(store, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
info = store.get('??' + key)
if info:
return [int(x) for x in info.split()]
def is_gc_running(ip, port):
s = get_gc_status(ip, port)
if s and s.find('running') >= 0:
return True
return False
def get_gc_status(ip, port):
t = telnetlib.Telnet(ip, port)
t.write('optimize_stat\r\n')
out = t.read_until('\n')
t.write('quit\r\n')
t.close()
return out.strip("\r\n")
def connect(server, **kwargs):
comp_threshold = kwargs.pop('comp_threshold', 0)
prefix = kwargs.pop('prefix', None)
if prefix is not None:
warnings.warn('"prefix" is deprecated. '
'use douban.wrapper.Prefix instead.')
c = libmc.Client([server],
do_split=0,
comp_threshold=comp_threshold,
prefix=prefix)
c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s
c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
return c
class MCStore(object):
IGNORED_LIBMC_RET = frozenset([
libmc.MC_RETURN_OK,
libmc.MC_RETURN_INVALID_KEY_ERR
])
def __init__(self, addr):
self.addr = addr
self.host, port = addr.split(":")
self.port = int(port)
self.mc = connect(addr)
def __repr__(self):
return '<MCStore(addr=%s)>' % repr(self.addr)
def __str__(self):
return self.addr
def set(self, key, data, rev=0):
return bool(self.mc.set(key, data, rev))
def set_raw(self, key, data, rev=0, flag=0):
if rev < 0:
raise Exception(str(rev))
return self.mc.set_raw(key, data, rev, flag)
def set_multi(self, values, return_failure=False):
return self.mc.set_multi(values, return_failure=return_failure)
def _check_last_error(self):
last_err = self.mc.get_last_error()
if last_err not in self.IGNORED_LIBMC_RET:
raise IOError(last_err, self.mc.get_last_strerror())
def get(self, key):
try:
r = self.mc.get(key)
if r is None:
self._check_last_error()
return r
except ValueError:
self.mc.delete(key)
def get_raw(self, key):
r, flag = self.mc.get_raw(key)
if r is None:
self._check_last_error()
return r, flag
def get_multi(self, keys):
r = self.mc.get_multi(keys)
self._check_last_error()
return r
def delete(self, key):
return bool(self.mc.delete(key))
def delete_multi(self, keys, return_failure=False):
return self.mc.delete_multi(keys, return_failure=return_failure)
def exists(self, key):
return bool(self.mc.get('?' + key))
def incr(self, key, value):
return self.mc.incr(key, int(value))
class DBClient(MCStore):
def __init__(self, addr):
MCStore.__init__(self, addr)
self._is_old = None
def stats(self):
stats = self.mc.stats()
return stats.values()[0] if stats else None
def is_old(self):
if self._is_old is None:
ver = self.get_server_version()
self._is_old = (ver.strip().split(".")[0] == "0")
return self._is_old
def get_collision_summary(self, bucket):
check_bucket(bucket)
raw = self.get("@collision_%x" % bucket)
if raw is None:
return None
count, hcount, khash, data_size = raw.split()
return (int(count), int(hcount), int(khash, 16), int(data_size))
def get_collision(self, bucket):
check_bucket(bucket)
collisions = defaultdict(dict)
hint_data = self.get("@collision_all_%x" % bucket)
if hint_data is None:
return dict()
for key, meta, _ in parse_new_hint_body(hint_data):
khash_str, _, ver, vhash = meta
collisions[khash_str][key] = (vhash, ver)
return dict(collisions)
def get_records_by_khash_raw(self, khash):
if self.is_old():
return []
if not isinstance(khash, str):
khash = "%016x" % khash
return self.get("@@" + khash)
def get_records_by_khash(self, khash_str):
raw = self.get_records_by_khash_raw(khash_str)
if raw:
return parse_records(raw, False)
else:
return []
def start_gc(self, bucket='', start_fid=0, end_fid=None):
""" bucket must be in 0 or 00 string """
if bucket:
assert isinstance(bucket, basestring) and len(bucket) <= 2
t = telnetlib.Telnet(self.host, self.port)
tree = '@%s' % bucket
if end_fid is None:
gc_cmd = 'gc {} {}\n'.format(tree, start_fid)
else:
gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid)
t.write(gc_cmd)
out = t.read_until('\n').strip('\r\n')
assert out == 'OK'
t.write('quit\n')
t.close()
def start_gc_all_buckets(self, db_depth):
hex_digits = string.digits + 'abcdef'
buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)])
buckets = [''.join(i) for i in buckets_iter]
self.start_gc_buckets(buckets)
def start_gc_buckets(self, buckets):
for b in buckets:
self.start_gc(bucket=b)
while True:
status = self.get_gc_status()
if status.find('running') >= 0:
continue
elif status == 'success':
print "bucket %s gc done" % b
break
elif status == 'fail':
return self.fail("optimize_stat = fail")
else:
self.fail(status)
def get_gc_status(self):
return get_gc_status(self.host, self.port)
def get_version(self, key):
meta = self.get("?" + key)
if meta:
return int(meta.split()[0])
def item_count(self):
s = self.stats()
if s is None:
return None
return int(s['total_items'])
def get_key_info_mem(self, key, khash64=None):
''' return (vhash, ver) or None'''
if khash64 is None:
khash64 = get_khash64(key)
khash32_str = "@%08x" % (khash64 >> 32)
_dir = self.get_dir(khash32_str)
if self.is_old():
return _dir.get(key, None)
else:
return _dir.get("%016x" % khash64, None)
def get_khash_info_mem(self, khash):
''' return [(key, (vhash, ver))], key is "" for v2.'''
khash32 = "@%08x" % (khash >> 32)
_dir = self.get_dir(khash32)
ret = []
if self.is_old():
for k, (vhash, ver) in _dir.iteritems():
if get_khash64(k) == khash:
ret.append((k, (vhash, ver)))
else:
for k, (vhash, ver) in _dir.iteritems():
if int(k, 16) == khash:
return [("", (int(vhash), ver))]
return ret
def get_server_version(self):
try:
st = self.stats()
if st:
return st["version"]
except IOError:
logging.error("fail to get version %s", self)
except KeyError:
logging.error("fail to get version %s %s", self, st)
def get_dir(self, path):
''' return dict
case1: map dir(0-f) to (hash, count),
like {'0/': (1471, 27784005), ... },
case2: map key(or khash) to (vhash, version),
like {'3000000377e9c2ad': (22212, 1), ... }'''
try:
content = self.get(path)
except IOError:
content = ''
return dir_to_dict(content)
def list_dir(self, d): # FIXME: d should not need prefix @?
'''list all KEY in the dir!
not use it if dir is large!'''
for path, (vhash, ver) in sorted(self.get_dir(d).items()):
if path.endswith('/') and len(path) == 2:
for v in self.list_dir(d + path[:-1]):
yield v
else:
yield path, int(vhash), int(ver)
def get_bucket_keys_count(self, bucket, depth=1):
return get_bucket_keys_count(self, bucket, depth)
def get_key_info_disk(self, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
return get_key_info_disk(self, key)
def prepare(self, data):
return libmc.encode_value(data, self.mc.comp_threshold)
def close(self):
pass
def test_new(addr, bucket):
b = bucket
c = DBClient(addr)
print "stats:", c.stats()
print 'version:', c.get_server_version()
print "isold:", c.is_old()
print "dir root:", c.get_dir("@")
print "bucket key count:", c.get_bucket_keys_count(int(b))
print "item_count:", c.item_count()
print "primary_buckets", get_primary_buckets(c)
leaf = c.get_dir("@" + b + "000000")
print "a dir leaf:", leaf
khash_str = list(leaf)[0]
print "a khash_str", khash_str
r = c.get_records_by_khash(khash_str)[0]
k = r[0]
print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:]
print "key info mem:", c.get_key_info_mem(k)
print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \
c.get_key_info_disk(k)
print "key version:", c.get_version(k)
print "collision_summary", c.get_collision_summary(int(b))
print "gc status:", c.get_gc_status()
if __name__ == '__main__':
test_new("rosa3a:7900", '3')
| en | 0.6216 | #!/usr/bin/python # encoding: utf-8 a rich client 1. for one server (instead of multi like in libmc.Client) 2. encapsulate @, ?, gc ... use is instead of libmc.Client return dict: buckets -> count return possible primary buckets, might be wrong on temporary nodes, result is list of buckets in integer return ver, vhash, flag, vsz, ts, fid, pos # 0.3s # 3s # 5s bucket must be in 0 or 00 string return (vhash, ver) or None return [(key, (vhash, ver))], key is "" for v2. return dict case1: map dir(0-f) to (hash, count), like {'0/': (1471, 27784005), ... }, case2: map key(or khash) to (vhash, version), like {'3000000377e9c2ad': (22212, 1), ... } # FIXME: d should not need prefix @? list all KEY in the dir! not use it if dir is large! return ver, vhash, flag, vsz, ts, fid, pos | 2.257095 | 2 |
Simulator/Geometry/RectOverlap.py | cuixiongyi/RBE595 | 0 | 401 | import matplotlib.pyplot
__author__ = 'xiongyi'
line1 = [(200, 100), (200, 400)]
line2 = [(190, 190), (210, 210)]
def overlap():
l1p1x = line1[0][0]
l1p1y = line1[0][1]
l1p2x = line1[1][0]
l1p2y = line1[1][1]
# make sure p1x < p2x
if l1p1x > l1p2x:
tmp = l1p1x
l1p1x = l1p2x
l1p2x = tmp
# make sure p1y < p2y
if l1p1y > l1p2y:
tmp = l1p1y
l1p1y = l1p2y
l1p2y = tmp
l2p1x = line2[0][0]
l2p1y = line2[0][1]
l2p2x = line2[1][0]
l2p2y = line2[1][1]
# make sure p1x < p2x
if l2p1x > l2p2x:
tmp = l2p1x
l2p1x = l2p2x
l2p2x = tmp
# make sure p1y < p2y
if l2p1y > l2p2y:
tmp = l2p1y
l2p1y = l2p2y
l2p2y = tmp
# line2 rectangle is inside line1 rect
if l1p1x < l2p2x and l1p2x > l2p1x and l1p1y < l2p2y and l1p2y > l2p1y:
return True
# line2 rectangle is inside line1 rect
if l1p1x > l2p2x and l1p2x < l2p1x and l1p1y > l2p2y and l1p2y < l2p1y:
return True
if l1p1x > l2p2x or l1p2x < l2p1x:
return False
if l1p1y > l2p2y or l1p2y < l2p1y:
return False
return True
if __name__ == '__main__':
matplotlib.pyplot.plot((line1[0][0],line1[1][0]),(line1[0][1],line1[1][1]))
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot((line2[0][0],line2[1][0]),(line2[0][1],line2[1][1]))
print(overlap())
matplotlib.pyplot.show()
| import matplotlib.pyplot
__author__ = 'xiongyi'
line1 = [(200, 100), (200, 400)]
line2 = [(190, 190), (210, 210)]
def overlap():
l1p1x = line1[0][0]
l1p1y = line1[0][1]
l1p2x = line1[1][0]
l1p2y = line1[1][1]
# make sure p1x < p2x
if l1p1x > l1p2x:
tmp = l1p1x
l1p1x = l1p2x
l1p2x = tmp
# make sure p1y < p2y
if l1p1y > l1p2y:
tmp = l1p1y
l1p1y = l1p2y
l1p2y = tmp
l2p1x = line2[0][0]
l2p1y = line2[0][1]
l2p2x = line2[1][0]
l2p2y = line2[1][1]
# make sure p1x < p2x
if l2p1x > l2p2x:
tmp = l2p1x
l2p1x = l2p2x
l2p2x = tmp
# make sure p1y < p2y
if l2p1y > l2p2y:
tmp = l2p1y
l2p1y = l2p2y
l2p2y = tmp
# line2 rectangle is inside line1 rect
if l1p1x < l2p2x and l1p2x > l2p1x and l1p1y < l2p2y and l1p2y > l2p1y:
return True
# line2 rectangle is inside line1 rect
if l1p1x > l2p2x and l1p2x < l2p1x and l1p1y > l2p2y and l1p2y < l2p1y:
return True
if l1p1x > l2p2x or l1p2x < l2p1x:
return False
if l1p1y > l2p2y or l1p2y < l2p1y:
return False
return True
if __name__ == '__main__':
matplotlib.pyplot.plot((line1[0][0],line1[1][0]),(line1[0][1],line1[1][1]))
matplotlib.pyplot.hold(True)
matplotlib.pyplot.plot((line2[0][0],line2[1][0]),(line2[0][1],line2[1][1]))
print(overlap())
matplotlib.pyplot.show()
| en | 0.822318 | # make sure p1x < p2x # make sure p1y < p2y # make sure p1x < p2x # make sure p1y < p2y # line2 rectangle is inside line1 rect # line2 rectangle is inside line1 rect | 3.130495 | 3 |
gino/loader.py | p4l1ly/gino | 0 | 402 | <reponame>p4l1ly/gino<gh_stars>0
from sqlalchemy import select
from sqlalchemy.schema import Column
from .declarative import Model
class Loader:
@classmethod
def get(cls, value):
from .crud import Alias
if isinstance(value, Loader):
rv = value
elif isinstance(value, type) and issubclass(value, Model):
rv = ModelLoader(value)
elif isinstance(value, Alias):
rv = AliasLoader(value)
elif isinstance(value, Column):
rv = ColumnLoader(value)
elif isinstance(value, tuple):
rv = TupleLoader(value)
elif callable(value):
rv = CallableLoader(value)
else:
rv = ValueLoader(value)
return rv
@property
def query(self):
rv = select(self.get_columns())
from_clause = self.get_from()
if from_clause is not None:
rv = rv.select_from(from_clause)
return rv.execution_options(loader=self)
def do_load(self, row, context):
raise NotImplementedError
def get_columns(self):
return []
def get_from(self):
return None
def __getattr__(self, item):
return getattr(self.query, item)
class ModelLoader(Loader):
def __init__(self, model, *column_names, **extras):
self.model = model
self._distinct = None
if column_names:
self.columns = [getattr(model, name) for name in column_names]
else:
self.columns = model
self.extras = dict((key, self.get(value))
for key, value in extras.items())
self.on_clause = None
def _do_load(self, row):
rv = self.model()
for c in self.columns:
if c in row:
rv.__values__[c.name] = row[c]
return rv
def do_load(self, row, context):
distinct = True
if self._distinct:
if context is None:
context = {}
ctx = context.setdefault(self._distinct, {})
key = tuple(row[col] for col in self._distinct)
if key == (None,) * len(key):
return None, None
rv = ctx.get(key)
if rv is None:
rv = self._do_load(row)
ctx[key] = rv
else:
distinct = False
else:
rv = self._do_load(row)
for key, value in self.extras.items():
value, distinct_ = value.do_load(row, context)
if distinct_ is not None:
setattr(rv, key, value)
return rv, distinct
def get_columns(self):
yield from self.columns
for subloader in self.extras.values():
yield from subloader.get_columns()
def get_from(self):
rv = self.model
for key, subloader in self.extras.items():
from_clause = subloader.get_from()
if from_clause is not None:
rv = rv.outerjoin(from_clause,
getattr(subloader, 'on_clause', None))
return rv
def load(self, *column_names, **extras):
if column_names:
self.columns = [getattr(self.model, name) for name in column_names]
self.extras.update((key, self.get(value))
for key, value in extras.items())
return self
def on(self, on_clause):
self.on_clause = on_clause
return self
def distinct(self, *columns):
self._distinct = columns
return self
class AliasLoader(ModelLoader):
def __init__(self, alias, *column_names, **extras):
super().__init__(alias, *column_names, **extras)
class ColumnLoader(Loader):
def __init__(self, column):
self.column = column
def do_load(self, row, context):
return row[self.column], True
class TupleLoader(Loader):
def __init__(self, values):
self.loaders = (self.get(value) for value in values)
def do_load(self, row, context):
return tuple(loader.do_load(row, context)[0]
for loader in self.loaders), True
class CallableLoader(Loader):
def __init__(self, func):
self.func = func
def do_load(self, row, context):
return self.func(row, context), True
class ValueLoader(Loader):
def __init__(self, value):
self.value = value
def do_load(self, row, context):
return self.value, True
| from sqlalchemy import select
from sqlalchemy.schema import Column
from .declarative import Model
class Loader:
@classmethod
def get(cls, value):
from .crud import Alias
if isinstance(value, Loader):
rv = value
elif isinstance(value, type) and issubclass(value, Model):
rv = ModelLoader(value)
elif isinstance(value, Alias):
rv = AliasLoader(value)
elif isinstance(value, Column):
rv = ColumnLoader(value)
elif isinstance(value, tuple):
rv = TupleLoader(value)
elif callable(value):
rv = CallableLoader(value)
else:
rv = ValueLoader(value)
return rv
@property
def query(self):
rv = select(self.get_columns())
from_clause = self.get_from()
if from_clause is not None:
rv = rv.select_from(from_clause)
return rv.execution_options(loader=self)
def do_load(self, row, context):
raise NotImplementedError
def get_columns(self):
return []
def get_from(self):
return None
def __getattr__(self, item):
return getattr(self.query, item)
class ModelLoader(Loader):
def __init__(self, model, *column_names, **extras):
self.model = model
self._distinct = None
if column_names:
self.columns = [getattr(model, name) for name in column_names]
else:
self.columns = model
self.extras = dict((key, self.get(value))
for key, value in extras.items())
self.on_clause = None
def _do_load(self, row):
rv = self.model()
for c in self.columns:
if c in row:
rv.__values__[c.name] = row[c]
return rv
def do_load(self, row, context):
distinct = True
if self._distinct:
if context is None:
context = {}
ctx = context.setdefault(self._distinct, {})
key = tuple(row[col] for col in self._distinct)
if key == (None,) * len(key):
return None, None
rv = ctx.get(key)
if rv is None:
rv = self._do_load(row)
ctx[key] = rv
else:
distinct = False
else:
rv = self._do_load(row)
for key, value in self.extras.items():
value, distinct_ = value.do_load(row, context)
if distinct_ is not None:
setattr(rv, key, value)
return rv, distinct
def get_columns(self):
yield from self.columns
for subloader in self.extras.values():
yield from subloader.get_columns()
def get_from(self):
rv = self.model
for key, subloader in self.extras.items():
from_clause = subloader.get_from()
if from_clause is not None:
rv = rv.outerjoin(from_clause,
getattr(subloader, 'on_clause', None))
return rv
def load(self, *column_names, **extras):
if column_names:
self.columns = [getattr(self.model, name) for name in column_names]
self.extras.update((key, self.get(value))
for key, value in extras.items())
return self
def on(self, on_clause):
self.on_clause = on_clause
return self
def distinct(self, *columns):
self._distinct = columns
return self
class AliasLoader(ModelLoader):
def __init__(self, alias, *column_names, **extras):
super().__init__(alias, *column_names, **extras)
class ColumnLoader(Loader):
def __init__(self, column):
self.column = column
def do_load(self, row, context):
return row[self.column], True
class TupleLoader(Loader):
def __init__(self, values):
self.loaders = (self.get(value) for value in values)
def do_load(self, row, context):
return tuple(loader.do_load(row, context)[0]
for loader in self.loaders), True
class CallableLoader(Loader):
def __init__(self, func):
self.func = func
def do_load(self, row, context):
return self.func(row, context), True
class ValueLoader(Loader):
def __init__(self, value):
self.value = value
def do_load(self, row, context):
return self.value, True | none | 1 | 2.518762 | 3 |
|
emission/clients/choice/choice.py | Andrew-Tan/e-mission-server | 0 | 403 | <gh_stars>0
# Standard imports
import logging
import math
import json
from uuid import UUID
from datetime import datetime, timedelta
import time
# Our imports
from emission.core.get_database import get_trip_db, get_section_db
import emission.analysis.result.carbon as carbon
import emission.core.common as common
import emission.net.api.stats as stats
from emission.core.wrapper.user import User
from emission.clients.leaderboard import leaderboard
from emission.clients.gamified import gamified
from emission.clients.recommendation import recommendation
from emission.clients.commontrips import commontrips
from emission.clients.data import data
# TODO: Consider subclassing to provide client specific user functions
def setCurrView(uuid, newView):
user = User.fromUUID(uuid)
user.setClientSpecificProfileFields({'curr_view': newView})
stats.storeResultEntry(uuid, stats.STAT_VIEW_CHOICE, time.time(), newView)
def getCurrView(uuid):
user = User.fromUUID(uuid)
profile = user.getProfile()
if profile is None:
logging.debug("profile is None, returning data")
return "data"
logging.debug("profile.get('curr_view', 'dummy') is %s" % profile.get("curr_view", "data"))
return profile.get("curr_view", "data")
def switchResultDisplay(params):
logging.debug("params = %s" % (params))
print "params = %s" % (params['uuid'])
try:
uuid = UUID(params['uuid'])
except:
uuid = "temp" ## For testing purposes
newView = params['new_view']
logging.debug("Changing choice for user %s to %s" % (uuid, newView))
setCurrView(uuid, newView)
# TODO: Add stats about the switch as part of the final stats-adding pass
return {'curr_view': newView }
def getResult(user_uuid):
# This is in here, as opposed to the top level as recommended by the PEP
# because then we don't have to worry about loading bottle in the unit tests
from bottle import template
import base64
from dao.user import User
from dao.client import Client
user = User.fromUUID(user_uuid)
renderedTemplate = template("clients/choice/result_template.html",
variables = json.dumps({'curr_view': getCurrView(user_uuid),
'uuid': str(user_uuid),
'client_key': Client("choice").getClientKey()}),
gameResult = base64.b64encode(gamified.getResult(user_uuid)),
leaderboardResult = base64.b64encode(leaderboard.getResult(user_uuid)),
dataResult = base64.b64encode(data.getResult(user_uuid)),
commonTripsResult = base64.b64encode(commontrips.getResult(user_uuid)),
recommendationResult = base64.b64encode(recommendation.getResult(user_uuid)))
return renderedTemplate
# These are copy/pasted from our first client, the carshare study
def getSectionFilter(uuid):
# We are not planning to do any filtering for this study. Bring on the worst!
return []
def clientSpecificSetters(uuid, sectionId, predictedModeMap):
return None
def getClientConfirmedModeField():
return None
# TODO: Simplify this. runBackgroundTasks are currently only invoked from the
# result precomputation code. We could change that code to pass in the day, and
# remove this interface. Extra credit: should we pass in the day, or a date
# range? Passing in the date range could make it possible for us to run the
# scripts more than once a day...
def runBackgroundTasks(uuid):
today = datetime.now().date()
runBackgroundTasksForDay(uuid, today)
def runBackgroundTasksForDay(uuid, today):
leaderboard.runBackgroundTasksForDay(uuid, today)
gamified.runBackgroundTasksForDay(uuid, today)
data.runBackgroundTasksForDay(uuid, today)
| # Standard imports
import logging
import math
import json
from uuid import UUID
from datetime import datetime, timedelta
import time
# Our imports
from emission.core.get_database import get_trip_db, get_section_db
import emission.analysis.result.carbon as carbon
import emission.core.common as common
import emission.net.api.stats as stats
from emission.core.wrapper.user import User
from emission.clients.leaderboard import leaderboard
from emission.clients.gamified import gamified
from emission.clients.recommendation import recommendation
from emission.clients.commontrips import commontrips
from emission.clients.data import data
# TODO: Consider subclassing to provide client specific user functions
def setCurrView(uuid, newView):
user = User.fromUUID(uuid)
user.setClientSpecificProfileFields({'curr_view': newView})
stats.storeResultEntry(uuid, stats.STAT_VIEW_CHOICE, time.time(), newView)
def getCurrView(uuid):
user = User.fromUUID(uuid)
profile = user.getProfile()
if profile is None:
logging.debug("profile is None, returning data")
return "data"
logging.debug("profile.get('curr_view', 'dummy') is %s" % profile.get("curr_view", "data"))
return profile.get("curr_view", "data")
def switchResultDisplay(params):
logging.debug("params = %s" % (params))
print "params = %s" % (params['uuid'])
try:
uuid = UUID(params['uuid'])
except:
uuid = "temp" ## For testing purposes
newView = params['new_view']
logging.debug("Changing choice for user %s to %s" % (uuid, newView))
setCurrView(uuid, newView)
# TODO: Add stats about the switch as part of the final stats-adding pass
return {'curr_view': newView }
def getResult(user_uuid):
# This is in here, as opposed to the top level as recommended by the PEP
# because then we don't have to worry about loading bottle in the unit tests
from bottle import template
import base64
from dao.user import User
from dao.client import Client
user = User.fromUUID(user_uuid)
renderedTemplate = template("clients/choice/result_template.html",
variables = json.dumps({'curr_view': getCurrView(user_uuid),
'uuid': str(user_uuid),
'client_key': Client("choice").getClientKey()}),
gameResult = base64.b64encode(gamified.getResult(user_uuid)),
leaderboardResult = base64.b64encode(leaderboard.getResult(user_uuid)),
dataResult = base64.b64encode(data.getResult(user_uuid)),
commonTripsResult = base64.b64encode(commontrips.getResult(user_uuid)),
recommendationResult = base64.b64encode(recommendation.getResult(user_uuid)))
return renderedTemplate
# These are copy/pasted from our first client, the carshare study
def getSectionFilter(uuid):
# We are not planning to do any filtering for this study. Bring on the worst!
return []
def clientSpecificSetters(uuid, sectionId, predictedModeMap):
return None
def getClientConfirmedModeField():
return None
# TODO: Simplify this. runBackgroundTasks are currently only invoked from the
# result precomputation code. We could change that code to pass in the day, and
# remove this interface. Extra credit: should we pass in the day, or a date
# range? Passing in the date range could make it possible for us to run the
# scripts more than once a day...
def runBackgroundTasks(uuid):
today = datetime.now().date()
runBackgroundTasksForDay(uuid, today)
def runBackgroundTasksForDay(uuid, today):
leaderboard.runBackgroundTasksForDay(uuid, today)
gamified.runBackgroundTasksForDay(uuid, today)
data.runBackgroundTasksForDay(uuid, today) | en | 0.933376 | # Standard imports # Our imports # TODO: Consider subclassing to provide client specific user functions ## For testing purposes # TODO: Add stats about the switch as part of the final stats-adding pass # This is in here, as opposed to the top level as recommended by the PEP # because then we don't have to worry about loading bottle in the unit tests # These are copy/pasted from our first client, the carshare study # We are not planning to do any filtering for this study. Bring on the worst! # TODO: Simplify this. runBackgroundTasks are currently only invoked from the # result precomputation code. We could change that code to pass in the day, and # remove this interface. Extra credit: should we pass in the day, or a date # range? Passing in the date range could make it possible for us to run the # scripts more than once a day... | 2.157107 | 2 |
lib/formatter/text.py | ylafon/redbot | 0 | 404 | <gh_stars>0
#!/usr/bin/env python
"""
HAR Formatter for REDbot.
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = """\
Copyright (c) 2008-2010 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
import nbhttp.error as nberr
import redbot.speak as rs
from redbot.formatter import Formatter
nl = u"\n"
# TODO: errors and status on stderr with CLI?
class BaseTextFormatter(Formatter):
"""
Base class for text formatters."""
media_type = "text/plain"
msg_categories = [
rs.c.GENERAL, rs.c.CONNECTION, rs.c.CONNEG,
rs.c.CACHING, rs.c.VALIDATION, rs.c.RANGE
]
link_order = [
('link', 'Head Links'),
('script', 'Script Links'),
('frame', 'Frame Links'),
('iframe', 'IFrame Links'),
('img', 'Image Links'),
]
error_template = "Error: %s\n"
def __init__(self, *args, **kw):
Formatter.__init__(self, *args, **kw)
def start_output(self):
pass
def feed(self, red, chunk):
pass
def status(self, msg):
pass
def finish_output(self):
"Fill in the template with RED's results."
if self.red.res_complete:
self.output(self.format_headers(self.red) + nl + nl)
self.output(self.format_recommendations(self.red) + nl)
else:
if self.red.res_error == None:
pass
elif self.red.res_error['desc'] == nberr.ERR_CONNECT['desc']:
self.output(self.error_template % "Could not connect to the server (%s)" % \
self.red.res_error.get('detail', "unknown"))
elif self.red.res_error['desc'] == nberr.ERR_URL['desc']:
self.output(self.error_template % self.red.res_error.get(
'detail', "RED can't fetch that URL."))
elif self.red.res_error['desc'] == nberr.ERR_READ_TIMEOUT['desc']:
self.output(self.error_template % self.red.res_error['desc'])
elif self.red.res_error['desc'] == nberr.ERR_HTTP_VERSION['desc']:
self.output(self.error_template % "<code>%s</code> isn't HTTP." % \
self.red.res_error.get('detail', '')[:20])
else:
raise AssertionError, "Unidentified incomplete response error."
def format_headers(self, red):
out = [u"HTTP/%s %s %s" % (
red.res_version, red.res_status, red.res_phrase)]
return nl.join(out + [u"%s:%s" % h for h in red.res_hdrs])
def format_recommendations(self, red):
return "".join([self.format_recommendation(red, category) \
for category in self.msg_categories])
def format_recommendation(self, red, category):
messages = [msg for msg in red.messages if msg.category == category]
if not messages:
return ""
out = []
if [msg for msg in messages]:
out.append(u"* %s:" % category)
for m in messages:
out.append(
u" * %s" % (self.colorize(m.level, m.summary["en"] % m.vars))
)
smsgs = [msg for msg in getattr(m.subrequest, "messages", []) if msg.level in [rs.l.BAD]]
if smsgs:
out.append("")
for sm in smsgs:
out.append(
u" * %s" %
(self.colorize(sm.level, sm.summary["en"] % sm.vars))
)
out.append(nl)
out.append(nl)
return nl.join(out)
@staticmethod
def colorize(level, string):
# info
color_start = u"\033[0;32m"
color_end = u"\033[0;39m"
if level == "good":
color_start = u"\033[1;32m"
color_end = u"\033[0;39m"
if level == "bad":
color_start = u"\033[1;31m"
color_end = u"\033[0;39m"
if level == "warning":
color_start = u"\033[1;33m"
color_end = u"\033[0;39m"
if level == "uri":
color_start = u"\033[1;34m"
color_end = u"\033[0;39m"
return color_start + string + color_end
class TextFormatter(BaseTextFormatter):
"""
Format a RED object as text.
"""
name = "txt"
media_type = "text/plain"
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
BaseTextFormatter.finish_output(self)
self.done()
class TextListFormatter(BaseTextFormatter):
"""
Format multiple RED responses as a textual list.
"""
name = "txt"
media_type = "text/plain"
can_multiple = True
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
"Fill in the template with RED's results."
BaseTextFormatter.finish_output(self)
sep = "=" * 78
for hdr_tag, heading in self.link_order:
droids = [d[0] for d in self.red.link_droids if d[1] == hdr_tag]
self.output("%s\n%s (%d)\n%s\n" % (
sep, heading, len(droids), sep
))
if droids:
droids.sort(key=operator.attrgetter('uri'))
for droid in droids:
self.output(self.format_uri(droid) + nl + nl)
self.output(self.format_headers(droid) + nl + nl)
self.output(self.format_recommendations(droid) + nl + nl)
self.done()
def format_uri(self, red):
return self.colorize("uri", red.uri)
| #!/usr/bin/env python
"""
HAR Formatter for REDbot.
"""
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = """\
Copyright (c) 2008-2010 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
import nbhttp.error as nberr
import redbot.speak as rs
from redbot.formatter import Formatter
nl = u"\n"
# TODO: errors and status on stderr with CLI?
class BaseTextFormatter(Formatter):
"""
Base class for text formatters."""
media_type = "text/plain"
msg_categories = [
rs.c.GENERAL, rs.c.CONNECTION, rs.c.CONNEG,
rs.c.CACHING, rs.c.VALIDATION, rs.c.RANGE
]
link_order = [
('link', 'Head Links'),
('script', 'Script Links'),
('frame', 'Frame Links'),
('iframe', 'IFrame Links'),
('img', 'Image Links'),
]
error_template = "Error: %s\n"
def __init__(self, *args, **kw):
Formatter.__init__(self, *args, **kw)
def start_output(self):
pass
def feed(self, red, chunk):
pass
def status(self, msg):
pass
def finish_output(self):
"Fill in the template with RED's results."
if self.red.res_complete:
self.output(self.format_headers(self.red) + nl + nl)
self.output(self.format_recommendations(self.red) + nl)
else:
if self.red.res_error == None:
pass
elif self.red.res_error['desc'] == nberr.ERR_CONNECT['desc']:
self.output(self.error_template % "Could not connect to the server (%s)" % \
self.red.res_error.get('detail', "unknown"))
elif self.red.res_error['desc'] == nberr.ERR_URL['desc']:
self.output(self.error_template % self.red.res_error.get(
'detail', "RED can't fetch that URL."))
elif self.red.res_error['desc'] == nberr.ERR_READ_TIMEOUT['desc']:
self.output(self.error_template % self.red.res_error['desc'])
elif self.red.res_error['desc'] == nberr.ERR_HTTP_VERSION['desc']:
self.output(self.error_template % "<code>%s</code> isn't HTTP." % \
self.red.res_error.get('detail', '')[:20])
else:
raise AssertionError, "Unidentified incomplete response error."
def format_headers(self, red):
out = [u"HTTP/%s %s %s" % (
red.res_version, red.res_status, red.res_phrase)]
return nl.join(out + [u"%s:%s" % h for h in red.res_hdrs])
def format_recommendations(self, red):
return "".join([self.format_recommendation(red, category) \
for category in self.msg_categories])
def format_recommendation(self, red, category):
messages = [msg for msg in red.messages if msg.category == category]
if not messages:
return ""
out = []
if [msg for msg in messages]:
out.append(u"* %s:" % category)
for m in messages:
out.append(
u" * %s" % (self.colorize(m.level, m.summary["en"] % m.vars))
)
smsgs = [msg for msg in getattr(m.subrequest, "messages", []) if msg.level in [rs.l.BAD]]
if smsgs:
out.append("")
for sm in smsgs:
out.append(
u" * %s" %
(self.colorize(sm.level, sm.summary["en"] % sm.vars))
)
out.append(nl)
out.append(nl)
return nl.join(out)
@staticmethod
def colorize(level, string):
# info
color_start = u"\033[0;32m"
color_end = u"\033[0;39m"
if level == "good":
color_start = u"\033[1;32m"
color_end = u"\033[0;39m"
if level == "bad":
color_start = u"\033[1;31m"
color_end = u"\033[0;39m"
if level == "warning":
color_start = u"\033[1;33m"
color_end = u"\033[0;39m"
if level == "uri":
color_start = u"\033[1;34m"
color_end = u"\033[0;39m"
return color_start + string + color_end
class TextFormatter(BaseTextFormatter):
"""
Format a RED object as text.
"""
name = "txt"
media_type = "text/plain"
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
BaseTextFormatter.finish_output(self)
self.done()
class TextListFormatter(BaseTextFormatter):
"""
Format multiple RED responses as a textual list.
"""
name = "txt"
media_type = "text/plain"
can_multiple = True
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
"Fill in the template with RED's results."
BaseTextFormatter.finish_output(self)
sep = "=" * 78
for hdr_tag, heading in self.link_order:
droids = [d[0] for d in self.red.link_droids if d[1] == hdr_tag]
self.output("%s\n%s (%d)\n%s\n" % (
sep, heading, len(droids), sep
))
if droids:
droids.sort(key=operator.attrgetter('uri'))
for droid in droids:
self.output(self.format_uri(droid) + nl + nl)
self.output(self.format_headers(droid) + nl + nl)
self.output(self.format_recommendations(droid) + nl + nl)
self.done()
def format_uri(self, red):
return self.colorize("uri", red.uri) | en | 0.754985 | #!/usr/bin/env python HAR Formatter for REDbot. \ Copyright (c) 2008-2010 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # TODO: errors and status on stderr with CLI? Base class for text formatters. # info Format a RED object as text. Format multiple RED responses as a textual list. | 2.126742 | 2 |
lib/ioe_pot.py | ifurusato/ros | 9 | 405 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-09-19
# modified: 2020-09-19
#
import sys, colorsys
import ioexpander as io
from colorama import init, Fore, Style
init()
from lib.logger import Logger
# ..............................................................................
class Potentiometer(object):
'''
Configures an IO Expander Potentiometer breakout, returning an analog
value scaled to a specified range. For a center-zero pot simply
specify the minimum value as (-1.0 * out_max).
'''
def __init__(self, config, level):
super().__init__()
self._log = Logger('ioe', level)
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('ioe_potentiometer')
# 0x18 for IO Expander, 0x0E for the potentiometer breakout
# self._i2c_addr = 0x0E
self._i2c_addr = _config.get('i2c_address')
self._pin_red = _config.get('pin_red')
self._pin_green = _config.get('pin_green')
self._pin_blue = _config.get('pin_blue')
self._log.info("pins: red: {}; green: {}; blue: {}".format(self._pin_red, self._pin_green, self._pin_blue))
self._pot_enc_a = 12
self._pot_enc_b = 3
self._pot_enc_c = 11
self._max_value = 3.3 # maximum voltage (3.3v supply)
self._brightness = _config.get('brightness') # effectively max fraction of period LED will be on
self._period = int(255 / self._brightness) # add a period large enough to get 0-255 steps at the desired brightness
_in_min = _config.get('in_min') # minimum analog value from IO Expander
_in_max = _config.get('in_max') # maximum analog value from IO Expander
self.set_input_limits(_in_min, _in_max)
_out_min = _config.get('out_min') # minimum scaled output value
_out_max = _config.get('out_max') # maximum scaled output value
self.set_output_limits(_out_min, _out_max)
# now configure IO Expander
self._ioe = io.IOE(i2c_addr=self._i2c_addr)
self._ioe.set_mode(self._pot_enc_a, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_b, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_c, io.ADC)
self._ioe.output(self._pot_enc_a, 1)
self._ioe.output(self._pot_enc_b, 0)
self._ioe.set_pwm_period(self._period)
self._ioe.set_pwm_control(divider=2) # PWM as fast as we can to avoid LED flicker
self._ioe.set_mode(self._pin_red, io.PWM, invert=True)
self._ioe.set_mode(self._pin_green, io.PWM, invert=True)
self._ioe.set_mode(self._pin_blue, io.PWM, invert=True)
self._log.info("running LED with {} brightness steps.".format(int(self._period * self._brightness)))
self._log.info("ready.")
# ..........................................................................
def set_input_limits(self, in_min, in_max):
self._in_min = in_min
self._in_max = in_max
self._log.info('input range:\t{:>5.2f}-{:<5.2f}'.format(self._in_min, self._in_max))
# ..........................................................................
def set_output_limits(self, out_min, out_max):
self._out_min = out_min
self._out_max = out_max
self._log.info('output range:\t{:>5.2f}-{:<5.2f}'.format(self._out_min, self._out_max))
# ..........................................................................
def get_value(self):
value = self._max_value - self._ioe.input(self._pot_enc_c)
self._log.debug(Fore.BLACK + 'value: {:<5.2f}'.format(value))
return value
# ..........................................................................
def set_rgb(self, value):
h = value / self._max_value # time.time() / 10.0
r, g, b = [int(c * self._period * self._brightness) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
self._ioe.output(self._pin_red, r)
self._ioe.output(self._pin_green, g)
self._ioe.output(self._pin_blue, b)
self._log.debug('value: {:<5.2f}; rgb: {},{},{}'.format(value, r, g, b))
# ..........................................................................
def get_scaled_value(self, update_led=True):
'''
Return a scaled value while also updating the RGB LED if the
argument is True (the default).
'''
_value = self.get_value()
if update_led:
self.set_rgb(_value)
return self.scale_value(_value) # as float
# # ..........................................................................
# def x_get_scaled_value(self):
# '''
# (out_max - out_min)(value - in_min)
# f(x) = ----------------------------------- + out_min
# in_max - in_min
# where: a = 0.0, b = 1.0, min = 0, max = 330.
# '''
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# ..........................................................................
def scale_value(self, value):
'''
(out_max - out_min)(value - in_min)
f(x) = ----------------------------------- + out_min
in_max - in_min
where e.g.: a = 0.0, b = 1.0, min = 0, max = 330.
'''
return (( self._out_max - self._out_min ) * ( value - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
#EOF
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by <NAME>. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: <NAME>
# created: 2020-09-19
# modified: 2020-09-19
#
import sys, colorsys
import ioexpander as io
from colorama import init, Fore, Style
init()
from lib.logger import Logger
# ..............................................................................
class Potentiometer(object):
'''
Configures an IO Expander Potentiometer breakout, returning an analog
value scaled to a specified range. For a center-zero pot simply
specify the minimum value as (-1.0 * out_max).
'''
def __init__(self, config, level):
super().__init__()
self._log = Logger('ioe', level)
if config is None:
raise ValueError('no configuration provided.')
_config = config['ros'].get('ioe_potentiometer')
# 0x18 for IO Expander, 0x0E for the potentiometer breakout
# self._i2c_addr = 0x0E
self._i2c_addr = _config.get('i2c_address')
self._pin_red = _config.get('pin_red')
self._pin_green = _config.get('pin_green')
self._pin_blue = _config.get('pin_blue')
self._log.info("pins: red: {}; green: {}; blue: {}".format(self._pin_red, self._pin_green, self._pin_blue))
self._pot_enc_a = 12
self._pot_enc_b = 3
self._pot_enc_c = 11
self._max_value = 3.3 # maximum voltage (3.3v supply)
self._brightness = _config.get('brightness') # effectively max fraction of period LED will be on
self._period = int(255 / self._brightness) # add a period large enough to get 0-255 steps at the desired brightness
_in_min = _config.get('in_min') # minimum analog value from IO Expander
_in_max = _config.get('in_max') # maximum analog value from IO Expander
self.set_input_limits(_in_min, _in_max)
_out_min = _config.get('out_min') # minimum scaled output value
_out_max = _config.get('out_max') # maximum scaled output value
self.set_output_limits(_out_min, _out_max)
# now configure IO Expander
self._ioe = io.IOE(i2c_addr=self._i2c_addr)
self._ioe.set_mode(self._pot_enc_a, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_b, io.PIN_MODE_PP)
self._ioe.set_mode(self._pot_enc_c, io.ADC)
self._ioe.output(self._pot_enc_a, 1)
self._ioe.output(self._pot_enc_b, 0)
self._ioe.set_pwm_period(self._period)
self._ioe.set_pwm_control(divider=2) # PWM as fast as we can to avoid LED flicker
self._ioe.set_mode(self._pin_red, io.PWM, invert=True)
self._ioe.set_mode(self._pin_green, io.PWM, invert=True)
self._ioe.set_mode(self._pin_blue, io.PWM, invert=True)
self._log.info("running LED with {} brightness steps.".format(int(self._period * self._brightness)))
self._log.info("ready.")
# ..........................................................................
def set_input_limits(self, in_min, in_max):
self._in_min = in_min
self._in_max = in_max
self._log.info('input range:\t{:>5.2f}-{:<5.2f}'.format(self._in_min, self._in_max))
# ..........................................................................
def set_output_limits(self, out_min, out_max):
self._out_min = out_min
self._out_max = out_max
self._log.info('output range:\t{:>5.2f}-{:<5.2f}'.format(self._out_min, self._out_max))
# ..........................................................................
def get_value(self):
value = self._max_value - self._ioe.input(self._pot_enc_c)
self._log.debug(Fore.BLACK + 'value: {:<5.2f}'.format(value))
return value
# ..........................................................................
def set_rgb(self, value):
h = value / self._max_value # time.time() / 10.0
r, g, b = [int(c * self._period * self._brightness) for c in colorsys.hsv_to_rgb(h, 1.0, 1.0)]
self._ioe.output(self._pin_red, r)
self._ioe.output(self._pin_green, g)
self._ioe.output(self._pin_blue, b)
self._log.debug('value: {:<5.2f}; rgb: {},{},{}'.format(value, r, g, b))
# ..........................................................................
def get_scaled_value(self, update_led=True):
'''
Return a scaled value while also updating the RGB LED if the
argument is True (the default).
'''
_value = self.get_value()
if update_led:
self.set_rgb(_value)
return self.scale_value(_value) # as float
# # ..........................................................................
# def x_get_scaled_value(self):
# '''
# (out_max - out_min)(value - in_min)
# f(x) = ----------------------------------- + out_min
# in_max - in_min
# where: a = 0.0, b = 1.0, min = 0, max = 330.
# '''
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# ..........................................................................
def scale_value(self, value):
'''
(out_max - out_min)(value - in_min)
f(x) = ----------------------------------- + out_min
in_max - in_min
where e.g.: a = 0.0, b = 1.0, min = 0, max = 330.
'''
return (( self._out_max - self._out_min ) * ( value - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
# return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min
#EOF
| en | 0.367042 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright 2020-2021 by <NAME>. All rights reserved. This file is part # of the Robot Operating System project, released under the MIT License. Please # see the LICENSE file included as part of this package. # # author: <NAME> # created: 2020-09-19 # modified: 2020-09-19 # # .............................................................................. Configures an IO Expander Potentiometer breakout, returning an analog value scaled to a specified range. For a center-zero pot simply specify the minimum value as (-1.0 * out_max). # 0x18 for IO Expander, 0x0E for the potentiometer breakout # self._i2c_addr = 0x0E # maximum voltage (3.3v supply) # effectively max fraction of period LED will be on # add a period large enough to get 0-255 steps at the desired brightness # minimum analog value from IO Expander # maximum analog value from IO Expander # minimum scaled output value # maximum scaled output value # now configure IO Expander # PWM as fast as we can to avoid LED flicker # .......................................................................... # .......................................................................... # .......................................................................... # .......................................................................... # time.time() / 10.0 # .......................................................................... Return a scaled value while also updating the RGB LED if the argument is True (the default). # as float # # .......................................................................... # def x_get_scaled_value(self): # ''' # (out_max - out_min)(value - in_min) # f(x) = ----------------------------------- + out_min # in_max - in_min # where: a = 0.0, b = 1.0, min = 0, max = 330. # ''' # return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min # .......................................................................... (out_max - out_min)(value - in_min) f(x) = ----------------------------------- + out_min in_max - in_min where e.g.: a = 0.0, b = 1.0, min = 0, max = 330. # return (( self._out_max - self._out_min ) * ( self.get_value() - self._in_min ) / ( self._in_max - self._in_min )) + self._out_min #EOF | 2.607883 | 3 |
stubs/micropython-esp32-1_12/urequests.py | RonaldHiemstra/micropython-stubs | 38 | 406 | """
Module: 'urequests' on esp32 1.12.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32')
# Stubber: 1.3.2
class Response:
''
def close():
pass
content = None
def json():
pass
text = None
def delete():
pass
def get():
pass
def head():
pass
def patch():
pass
def post():
pass
def put():
pass
def request():
pass
usocket = None
| """
Module: 'urequests' on esp32 1.12.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32')
# Stubber: 1.3.2
class Response:
''
def close():
pass
content = None
def json():
pass
text = None
def delete():
pass
def get():
pass
def head():
pass
def patch():
pass
def post():
pass
def put():
pass
def request():
pass
usocket = None
| en | 0.27241 | Module: 'urequests' on esp32 1.12.0 # MCU: (sysname='esp32', nodename='esp32', release='1.12.0', version='v1.12 on 2019-12-20', machine='ESP32 module (spiram) with ESP32') # Stubber: 1.3.2 | 1.678988 | 2 |
python/re_user.py | seckcoder/lang-learn | 1 | 407 | <filename>python/re_user.py
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: <EMAIL>
import re
from urlparse import urlparse
def parse1():
p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)")
o = urlparse("http://weibo.com/2827699110/yz62AlEjF")
m = p.search(o.path)
print m.group('uid')
print m.group('mid')
def parse2():
exc_type_str = "<type 'exceptions.IndexError'>"
parse1()
| <filename>python/re_user.py
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: <EMAIL>
import re
from urlparse import urlparse
def parse1():
p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)")
o = urlparse("http://weibo.com/2827699110/yz62AlEjF")
m = p.search(o.path)
print m.group('uid')
print m.group('mid')
def parse2():
exc_type_str = "<type 'exceptions.IndexError'>"
parse1()
| en | 0.44418 | #!/usr/bin/env python #-*- coding=utf-8 -*- # # Copyright 2012 Jike Inc. All Rights Reserved. # Author: <EMAIL> | 3.10534 | 3 |
TransactionBook/gui_kivy/generic/MultiSelectPopUp.py | LukHad/AccountBook | 0 | 408 | <reponame>LukHad/AccountBook<filename>TransactionBook/gui_kivy/generic/MultiSelectPopUp.py
from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
import matplotlib.pyplot as plt
import matplotlib
import datetime
from TransactionBook.model.Filter import Filter
from datetime import datetime
from kivy.uix.popup import Popup
from kivy.properties import NumericProperty, ReferenceListProperty
from kivy.uix.checkbox import CheckBox
from kivy.core.window import Window
class MultiSelectPopUp(Popup):
pHint_x = NumericProperty(0.7)
pHint_y = NumericProperty(0.7)
pHint = ReferenceListProperty(pHint_x, pHint_y)
def __init__(self, title, option_list, option_init=None, callback=None, multiselect=True, **kwargs):
super().__init__(**kwargs)
self.title = title
self.callback = callback
self.main_layout = AnchorLayout()
if option_init is None:
option_init = [True] * len(option_list)
self.grid = GridLayout(cols=1)
self.opt_boxes = []
self.labels = []
for i, opt in enumerate(option_list):
box = BoxLayout(orientation='horizontal')
check_box = CheckBox(active=option_init[i])
if not multiselect:
check_box.group = "Single_Select_Only_Group"
label = Label(text=str(opt))
self.opt_boxes.append(check_box)
self.labels.append(label)
box.add_widget(check_box)
box.add_widget(label)
self.grid.add_widget(box)
cancel_button = Button(text="Cancel")
cancel_button.bind(on_press=self.cancel_callback)
ok_button = Button(text="Ok")
ok_button.bind(on_press=self.ok_callback)
box = BoxLayout(orientation='horizontal')
box.add_widget(cancel_button)
box.add_widget(ok_button)
self.grid.add_widget(box)
self.main_layout.add_widget(self.grid)
self.content = self.main_layout
self.size_hint = self.pHint
Window.release_all_keyboards()
self.open()
def ok_callback(self, _):
selection = []
for i, check_box in enumerate(self.opt_boxes):
if check_box.active:
selection.append(self.labels[i].text)
self.callback(selection)
self.dismiss()
def cancel_callback(self, _):
self.dismiss()
if __name__ == "__main__":
from kivy.base import runTouchApp
def cb(list_of_selection):
print(list_of_selection)
c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True])
runTouchApp(c) | from kivy.uix.gridlayout import GridLayout
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from kivy.garden.matplotlib.backend_kivyagg import FigureCanvasKivyAgg
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
import matplotlib.pyplot as plt
import matplotlib
import datetime
from TransactionBook.model.Filter import Filter
from datetime import datetime
from kivy.uix.popup import Popup
from kivy.properties import NumericProperty, ReferenceListProperty
from kivy.uix.checkbox import CheckBox
from kivy.core.window import Window
class MultiSelectPopUp(Popup):
pHint_x = NumericProperty(0.7)
pHint_y = NumericProperty(0.7)
pHint = ReferenceListProperty(pHint_x, pHint_y)
def __init__(self, title, option_list, option_init=None, callback=None, multiselect=True, **kwargs):
super().__init__(**kwargs)
self.title = title
self.callback = callback
self.main_layout = AnchorLayout()
if option_init is None:
option_init = [True] * len(option_list)
self.grid = GridLayout(cols=1)
self.opt_boxes = []
self.labels = []
for i, opt in enumerate(option_list):
box = BoxLayout(orientation='horizontal')
check_box = CheckBox(active=option_init[i])
if not multiselect:
check_box.group = "Single_Select_Only_Group"
label = Label(text=str(opt))
self.opt_boxes.append(check_box)
self.labels.append(label)
box.add_widget(check_box)
box.add_widget(label)
self.grid.add_widget(box)
cancel_button = Button(text="Cancel")
cancel_button.bind(on_press=self.cancel_callback)
ok_button = Button(text="Ok")
ok_button.bind(on_press=self.ok_callback)
box = BoxLayout(orientation='horizontal')
box.add_widget(cancel_button)
box.add_widget(ok_button)
self.grid.add_widget(box)
self.main_layout.add_widget(self.grid)
self.content = self.main_layout
self.size_hint = self.pHint
Window.release_all_keyboards()
self.open()
def ok_callback(self, _):
selection = []
for i, check_box in enumerate(self.opt_boxes):
if check_box.active:
selection.append(self.labels[i].text)
self.callback(selection)
self.dismiss()
def cancel_callback(self, _):
self.dismiss()
if __name__ == "__main__":
from kivy.base import runTouchApp
def cb(list_of_selection):
print(list_of_selection)
c = MultiSelectPopUp(title="Test", option_list=["Item1", "Item2", "Item3"], callback=cb, option_init=[True, False, True])
runTouchApp(c) | none | 1 | 2.528431 | 3 |
|
Assignment1/Part2/Bridge2.py | MormonJesus69420/Knowledge-Based-Systems-Project | 0 | 409 | <reponame>MormonJesus69420/Knowledge-Based-Systems-Project
from dataclasses import dataclass, field
from typing import List
from Car2 import Car
@dataclass
class Bridge:
"""Bridge class simulating the behaviour of bridge in simulation.
On can set specific length and capacity for the bridge to change the overall
behaviour of bridge in the simulation and see how it impacts the scores for
cars.
"""
capacity: int = field(default=5)
"""Set amount of cars that the bridge can accommodate before collapsing."""
length: int = field(default=10)
"""Length of bridge deciding how much time a car will use to cross it."""
cars: List[Car] = field(default_factory=list, repr=False, init=False)
"""List of all of the cars that are currently on the bridge."""
def has_collapsed(self) -> bool:
"""Simple method to check if bridge has collapsed.
Returns:
bool: True if bridge has collapsed, False otherwise.
"""
return len(self.cars) > self.capacity
def move_cars(self) -> List[Car]:
""" Moves cars across the bridge and returns cars that have crossed it.
Returns:
List[Car]: List of cars that have crossed the bridge this turn.
"""
finished_cars = list()
for c in self.cars:
c.distance_on_bridge += c.speed
if c.distance_on_bridge >= self.length:
c.distance_on_bridge = 0
finished_cars.append(c)
self.cars = [c for c in self.cars if c not in finished_cars]
return finished_cars
def collapse_bridge(self) -> List[Car]:
"""Returns a list of all cars on bridge and sets cars to empty list.
Returns:
List[Car]: List of cars that were on bridge when it collapsed.
"""
temp = self.cars
for c in temp:
c.distance_on_bridge = 0
self.cars = list()
return temp
| from dataclasses import dataclass, field
from typing import List
from Car2 import Car
@dataclass
class Bridge:
"""Bridge class simulating the behaviour of bridge in simulation.
On can set specific length and capacity for the bridge to change the overall
behaviour of bridge in the simulation and see how it impacts the scores for
cars.
"""
capacity: int = field(default=5)
"""Set amount of cars that the bridge can accommodate before collapsing."""
length: int = field(default=10)
"""Length of bridge deciding how much time a car will use to cross it."""
cars: List[Car] = field(default_factory=list, repr=False, init=False)
"""List of all of the cars that are currently on the bridge."""
def has_collapsed(self) -> bool:
"""Simple method to check if bridge has collapsed.
Returns:
bool: True if bridge has collapsed, False otherwise.
"""
return len(self.cars) > self.capacity
def move_cars(self) -> List[Car]:
""" Moves cars across the bridge and returns cars that have crossed it.
Returns:
List[Car]: List of cars that have crossed the bridge this turn.
"""
finished_cars = list()
for c in self.cars:
c.distance_on_bridge += c.speed
if c.distance_on_bridge >= self.length:
c.distance_on_bridge = 0
finished_cars.append(c)
self.cars = [c for c in self.cars if c not in finished_cars]
return finished_cars
def collapse_bridge(self) -> List[Car]:
"""Returns a list of all cars on bridge and sets cars to empty list.
Returns:
List[Car]: List of cars that were on bridge when it collapsed.
"""
temp = self.cars
for c in temp:
c.distance_on_bridge = 0
self.cars = list()
return temp | en | 0.953799 | Bridge class simulating the behaviour of bridge in simulation. On can set specific length and capacity for the bridge to change the overall behaviour of bridge in the simulation and see how it impacts the scores for cars. Set amount of cars that the bridge can accommodate before collapsing. Length of bridge deciding how much time a car will use to cross it. List of all of the cars that are currently on the bridge. Simple method to check if bridge has collapsed. Returns: bool: True if bridge has collapsed, False otherwise. Moves cars across the bridge and returns cars that have crossed it. Returns: List[Car]: List of cars that have crossed the bridge this turn. Returns a list of all cars on bridge and sets cars to empty list. Returns: List[Car]: List of cars that were on bridge when it collapsed. | 4.180892 | 4 |
lib/tuner_interface.py | jefflundberg/locast2plex | 0 | 410 | <reponame>jefflundberg/locast2plex
import subprocess
import threading
import time
import errno
import socket
import urllib
import pathlib
from io import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
import lib.stations as stations
import lib.epg2xml as epg2xml
import lib.channels_m3u as channels_m3u
from lib.templates import templates
# with help from https://www.acmesystems.it/python_http
# and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler
class PlexHttpHandler(BaseHTTPRequestHandler):
# using class variables since this should only be set once
config = None
hdhr_station_scan = False
rmg_station_scans = []
local_locast = None
location = None
def do_GET(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
# paths and logic mostly pulled from telly:routes.go: https://github.com/tellytv/telly
if (contentPath == '/') and (not self.config['main']['use_old_plex_interface']):
self.do_response(200,
'application/xml',
templates['xmlRmgIdentification'].format(self.config['main']['reporting_friendly_name']))
elif (contentPath == '/') or (contentPath == '/device.xml'):
templateName = 'xmlDiscover'
if self.config['main']['use_old_plex_interface']:
templateName = 'xmlDiscoverOld'
self.do_response(200,
'application/xml',
templates[templateName].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/discover.json':
self.do_response(200,
'application/json',
templates['jsonDiscover'].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['reporting_firmware_name'],
self.config['main']['tuner_count'],
self.config['main']['reporting_firmware_ver'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/lineup_status.json':
if self.hdhr_station_scan:
returnJSON = templates['jsonLineupStatus']
else:
returnJSON = templates['jsonLineupComplete'].replace("Antenna", self.config['main']['tuner_type'])
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.json': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnJSON = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
returnJSON = returnJSON + templates['jsonLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
if (index + 1) != len(station_list):
returnJSON = returnJSON + ','
returnJSON = "[" + returnJSON + "]"
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.xml': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnXML = ''
for list_key in station_list:
sid = str(list_key)
returnXML = returnXML + templates['xmlLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
returnXML = "<Lineup>" + returnXML + "</Lineup>"
self.do_response(200, 'application/xml', returnXML)
elif contentPath.startswith('/watch'):
self.do_tuning(contentPath.replace('/watch/', ''))
elif contentPath.startswith('/auto/v'):
self.do_tuning(contentPath.replace('/auto/v', ''))
elif ((contentPath.startswith('/devices/' + self.config['main']['uuid'] + '/media/')) and
(not self.config['main']['use_old_plex_interface'])):
channel_no = contentPath.replace('/devices/' + self.config['main']['uuid'] + '/media/', '')
channel_no = urllib.parse.unquote(channel_no).replace('id://', '').replace('/', '')
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
for sid in station_list:
if station_list[sid]['channel'] == channel_no:
break
self.do_tuning(sid)
elif contentPath == '/xmltv.xml':
self.do_response(200, 'application/xml', epg2xml.get_epg(self.config, self.location))
elif contentPath == '/channels.m3u':
self.do_response(200, 'application/vnd.apple.mpegurl', channels_m3u.get_channels_m3u(self.config, self.location, base_url))
elif contentPath == '/debug.json':
self.do_response(200, 'application/json')
elif ((contentPath == '/devices/' + self.config['main']['uuid']) and
(not self.config['main']['use_old_plex_interface'])):
tuner_list = ""
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
tuner_list = tuner_list + templates['xmlRmgTunerIdle'].format(str(index))
elif scan_status == 'Scan':
tuner_list = tuner_list + templates['xmlRmgTunerScanning'].format(str(index))
else:
# otherwise, we're streaming, and the value will be the channel triplet
formatted_xml = templates['xmlRmgTunerStreaming'].format(str(index), scan_status)
tuner_list = tuner_list + formatted_xml
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceIdentity'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url,
tuner_list))
elif((contentPath == '/devices/' + self.config['main']['uuid'] + '/channels') and
(not self.config['main']['use_old_plex_interface'])):
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
channelXML = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
tmpXML = templates['xmlRmgDeviceChannelItem'].format(station_list[sid]['channel'],
station_list[sid]['friendlyName'])
channelXML = channelXML + tmpXML
self.do_response(200, 'application/xml', templates['xmlRmgDeviceChannels'].format(index + 1, channelXML))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scanners') and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200, 'application/xml', templates['xmlRmgScanProviders'].format(self.location['city']))
else:
print("Unknown request to " + contentPath)
self.do_response(501, 'text/html', templates['htmlError'].format('501 - Not Implemented'))
return
def do_POST(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if contentPath == '/lineup.post':
if queryData['scan'] == 'start':
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200, 'text/html')
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
elif queryData['scan'] == 'abort':
self.do_response(200, 'text/html')
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown scan command " + queryData['scan'])
self.do_response(400, 'text/html', templates['htmlError'].format(queryData['scan'] + ' is not a valid scan command'))
elif ((contentPath.startswith('/devices/discover') or contentPath.startswith('/devices/probe')) and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceDiscover'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200,
'application/xml',
templates['xmlRmgScanStatus'])
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.local_locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown request to " + contentPath)
return
def do_DELETE(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
def do_tuning(self, sid):
channelUri = self.local_locast.get_station_stream_uri(sid)
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
tuner_found = False
# keep track of how many tuners we can use at a time
for index, scan_status in enumerate(self.rmg_station_scans):
# the first idle tuner gets it
if scan_status == 'Idle':
self.rmg_station_scans[index] = station_list[sid]['channel']
tuner_found = True
break
if tuner_found:
self.send_response(200)
self.send_header('Content-type', 'video/mpeg; codecs="avc1.4D401E')
self.end_headers()
ffmpeg_command = [self.config['main']['ffmpeg_path'],
"-i", channelUri,
"-c:v", "copy",
"-c:a", "copy",
"-f", "mpegts",
"-nostats", "-hide_banner",
"-loglevel", "warning",
"pipe:1"]
ffmpeg_proc = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)
# get initial videodata. if that works, then keep grabbing it
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
while True:
if not videoData:
break
else:
# from https://stackoverflow.com/questions/9932332
try:
self.wfile.write(videoData)
time.sleep(0.1)
except IOError as e:
# Check we hit a broken pipe when trying to write back to the client
if e.errno in [errno.EPIPE, errno.ECONNABORTED, errno.ECONNRESET, errno.ECONNREFUSED]:
break
else:
raise
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
# Send SIGTERM to shutdown ffmpeg
ffmpeg_proc.terminate()
try:
# ffmpeg writes a bit of data out to stderr after it terminates,
# need to read any hanging data to prevent a zombie process.
ffmpeg_proc.communicate()
except ValueError:
print("Connection Closed")
self.rmg_station_scans[index] = 'Idle'
else:
self.send_response(400, 'All tuners already in use.')
self.send_header('Content-type', 'text/html')
self.end_headers()
reply_str = templates['htmlError'].format('All tuners already in use.')
self.wfile.write(reply_str.encode('utf-8'))
def do_response(self, code, mime, reply_str):
self.send_response(code)
self.send_header('Content-type', mime)
self.end_headers()
if reply_str:
self.wfile.write(reply_str.encode('utf-8'))
# mostly from https://github.com/ZeWaren/python-upnp-ssdp-example
# and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port
class PlexHttpServer(threading.Thread):
def __init__(self, serverSocket, config, locast_service, location):
threading.Thread.__init__(self)
PlexHttpHandler.config = config
self.bind_ip = config["main"]["bind_ip"]
self.bind_port = config["main"]["bind_port"]
PlexHttpHandler.stations = stations
PlexHttpHandler.local_locast = locast_service
PlexHttpHandler.location = location
# init station scans
tmp_rmg_scans = []
for x in range(int(config['main']['tuner_count'])):
tmp_rmg_scans.append('Idle')
PlexHttpHandler.rmg_station_scans = tmp_rmg_scans
self.socket = serverSocket
self.daemon = True
self.start()
def run(self):
httpd = HTTPServer((self.bind_ip, int(self.bind_port)), PlexHttpHandler, False)
httpd.socket = self.socket
httpd.server_bind = self.server_close = lambda self: None
httpd.serve_forever()
def start(config, locast, location):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((config["main"]['bind_ip'], int(config["main"]['bind_port'])))
serverSocket.listen(int(config["main"]["concurrent_listeners"]))
print("Now listening for requests.")
for i in range(int(config["main"]["concurrent_listeners"])):
PlexHttpServer(serverSocket, config, locast, location)
| import subprocess
import threading
import time
import errno
import socket
import urllib
import pathlib
from io import StringIO
from http.server import BaseHTTPRequestHandler, HTTPServer
import lib.stations as stations
import lib.epg2xml as epg2xml
import lib.channels_m3u as channels_m3u
from lib.templates import templates
# with help from https://www.acmesystems.it/python_http
# and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler
class PlexHttpHandler(BaseHTTPRequestHandler):
# using class variables since this should only be set once
config = None
hdhr_station_scan = False
rmg_station_scans = []
local_locast = None
location = None
def do_GET(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
# paths and logic mostly pulled from telly:routes.go: https://github.com/tellytv/telly
if (contentPath == '/') and (not self.config['main']['use_old_plex_interface']):
self.do_response(200,
'application/xml',
templates['xmlRmgIdentification'].format(self.config['main']['reporting_friendly_name']))
elif (contentPath == '/') or (contentPath == '/device.xml'):
templateName = 'xmlDiscover'
if self.config['main']['use_old_plex_interface']:
templateName = 'xmlDiscoverOld'
self.do_response(200,
'application/xml',
templates[templateName].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/discover.json':
self.do_response(200,
'application/json',
templates['jsonDiscover'].format(self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['reporting_firmware_name'],
self.config['main']['tuner_count'],
self.config['main']['reporting_firmware_ver'],
self.config['main']['uuid'],
base_url))
elif contentPath == '/lineup_status.json':
if self.hdhr_station_scan:
returnJSON = templates['jsonLineupStatus']
else:
returnJSON = templates['jsonLineupComplete'].replace("Antenna", self.config['main']['tuner_type'])
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.json': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnJSON = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
returnJSON = returnJSON + templates['jsonLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
if (index + 1) != len(station_list):
returnJSON = returnJSON + ','
returnJSON = "[" + returnJSON + "]"
self.do_response(200, 'application/json', returnJSON)
elif contentPath == '/lineup.xml': # TODO
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
returnXML = ''
for list_key in station_list:
sid = str(list_key)
returnXML = returnXML + templates['xmlLineupItem'].format(station_list[sid]['channel'], station_list[sid]['friendlyName'], base_url + '/watch/' + sid)
returnXML = "<Lineup>" + returnXML + "</Lineup>"
self.do_response(200, 'application/xml', returnXML)
elif contentPath.startswith('/watch'):
self.do_tuning(contentPath.replace('/watch/', ''))
elif contentPath.startswith('/auto/v'):
self.do_tuning(contentPath.replace('/auto/v', ''))
elif ((contentPath.startswith('/devices/' + self.config['main']['uuid'] + '/media/')) and
(not self.config['main']['use_old_plex_interface'])):
channel_no = contentPath.replace('/devices/' + self.config['main']['uuid'] + '/media/', '')
channel_no = urllib.parse.unquote(channel_no).replace('id://', '').replace('/', '')
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
for sid in station_list:
if station_list[sid]['channel'] == channel_no:
break
self.do_tuning(sid)
elif contentPath == '/xmltv.xml':
self.do_response(200, 'application/xml', epg2xml.get_epg(self.config, self.location))
elif contentPath == '/channels.m3u':
self.do_response(200, 'application/vnd.apple.mpegurl', channels_m3u.get_channels_m3u(self.config, self.location, base_url))
elif contentPath == '/debug.json':
self.do_response(200, 'application/json')
elif ((contentPath == '/devices/' + self.config['main']['uuid']) and
(not self.config['main']['use_old_plex_interface'])):
tuner_list = ""
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
tuner_list = tuner_list + templates['xmlRmgTunerIdle'].format(str(index))
elif scan_status == 'Scan':
tuner_list = tuner_list + templates['xmlRmgTunerScanning'].format(str(index))
else:
# otherwise, we're streaming, and the value will be the channel triplet
formatted_xml = templates['xmlRmgTunerStreaming'].format(str(index), scan_status)
tuner_list = tuner_list + formatted_xml
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceIdentity'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url,
tuner_list))
elif((contentPath == '/devices/' + self.config['main']['uuid'] + '/channels') and
(not self.config['main']['use_old_plex_interface'])):
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
channelXML = ''
for index, list_key in enumerate(station_list):
sid = str(list_key)
tmpXML = templates['xmlRmgDeviceChannelItem'].format(station_list[sid]['channel'],
station_list[sid]['friendlyName'])
channelXML = channelXML + tmpXML
self.do_response(200, 'application/xml', templates['xmlRmgDeviceChannels'].format(index + 1, channelXML))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scanners') and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200, 'application/xml', templates['xmlRmgScanProviders'].format(self.location['city']))
else:
print("Unknown request to " + contentPath)
self.do_response(501, 'text/html', templates['htmlError'].format('501 - Not Implemented'))
return
def do_POST(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if contentPath == '/lineup.post':
if queryData['scan'] == 'start':
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200, 'text/html')
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
elif queryData['scan'] == 'abort':
self.do_response(200, 'text/html')
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown scan command " + queryData['scan'])
self.do_response(400, 'text/html', templates['htmlError'].format(queryData['scan'] + ' is not a valid scan command'))
elif ((contentPath.startswith('/devices/discover') or contentPath.startswith('/devices/probe')) and
(not self.config['main']['use_old_plex_interface'])):
self.do_response(200,
'application/xml',
templates['xmlRmgDeviceDiscover'].format(self.config['main']['uuid'],
self.config['main']['reporting_friendly_name'],
self.config['main']['reporting_model'],
self.config['main']['tuner_count'],
base_url))
elif ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = True
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Idle':
self.rmg_station_scans[index] = "Scan"
self.do_response(200,
'application/xml',
templates['xmlRmgScanStatus'])
# putting this here after the response on purpose
stations.refresh_dma_stations_and_channels(self.config, self.local_locast, self.location)
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
else:
print("Unknown request to " + contentPath)
return
def do_DELETE(self):
base_url = self.config['main']['plex_accessible_ip'] + ':' + self.config['main']['plex_accessible_port']
contentPath = self.path
queryData = {}
if self.headers.get('Content-Length') != '0':
postdata = self.rfile.read(int(self.headers.get('Content-Length')))
postdataElements = postdata.split('&')
for postdataItem in postdataElements:
postdataItemSplit = postdataItem.split('=')
if len(postdataItemSplit) > 1:
queryData[postdataItemSplit[0]] = postdataItemSplit[1]
if self.path.find('?') != -1:
contentPath = self.path[0:self.path.find('?')]
getdata = self.path[(self.path.find('?') + 1):]
getdataElements = getdata.split('&')
for getdataItem in getdataElements:
getdataItemSplit = getdataItem.split('=')
if len(getdataItemSplit) > 1:
queryData[getdataItemSplit[0]] = getdataItemSplit[1]
if ((contentPath == '/devices/' + self.config['main']['uuid'] + '/scan') and
(not self.config['main']['use_old_plex_interface'])):
self.hdhr_station_scan = False
for index, scan_status in enumerate(self.rmg_station_scans):
if scan_status == 'Scan':
self.rmg_station_scans[index] = "Idle"
def do_tuning(self, sid):
channelUri = self.local_locast.get_station_stream_uri(sid)
station_list = stations.get_dma_stations_and_channels(self.config, self.location)
tuner_found = False
# keep track of how many tuners we can use at a time
for index, scan_status in enumerate(self.rmg_station_scans):
# the first idle tuner gets it
if scan_status == 'Idle':
self.rmg_station_scans[index] = station_list[sid]['channel']
tuner_found = True
break
if tuner_found:
self.send_response(200)
self.send_header('Content-type', 'video/mpeg; codecs="avc1.4D401E')
self.end_headers()
ffmpeg_command = [self.config['main']['ffmpeg_path'],
"-i", channelUri,
"-c:v", "copy",
"-c:a", "copy",
"-f", "mpegts",
"-nostats", "-hide_banner",
"-loglevel", "warning",
"pipe:1"]
ffmpeg_proc = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)
# get initial videodata. if that works, then keep grabbing it
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
while True:
if not videoData:
break
else:
# from https://stackoverflow.com/questions/9932332
try:
self.wfile.write(videoData)
time.sleep(0.1)
except IOError as e:
# Check we hit a broken pipe when trying to write back to the client
if e.errno in [errno.EPIPE, errno.ECONNABORTED, errno.ECONNRESET, errno.ECONNREFUSED]:
break
else:
raise
videoData = ffmpeg_proc.stdout.read(int(self.config['main']['bytes_per_read']))
# Send SIGTERM to shutdown ffmpeg
ffmpeg_proc.terminate()
try:
# ffmpeg writes a bit of data out to stderr after it terminates,
# need to read any hanging data to prevent a zombie process.
ffmpeg_proc.communicate()
except ValueError:
print("Connection Closed")
self.rmg_station_scans[index] = 'Idle'
else:
self.send_response(400, 'All tuners already in use.')
self.send_header('Content-type', 'text/html')
self.end_headers()
reply_str = templates['htmlError'].format('All tuners already in use.')
self.wfile.write(reply_str.encode('utf-8'))
def do_response(self, code, mime, reply_str):
self.send_response(code)
self.send_header('Content-type', mime)
self.end_headers()
if reply_str:
self.wfile.write(reply_str.encode('utf-8'))
# mostly from https://github.com/ZeWaren/python-upnp-ssdp-example
# and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port
class PlexHttpServer(threading.Thread):
def __init__(self, serverSocket, config, locast_service, location):
threading.Thread.__init__(self)
PlexHttpHandler.config = config
self.bind_ip = config["main"]["bind_ip"]
self.bind_port = config["main"]["bind_port"]
PlexHttpHandler.stations = stations
PlexHttpHandler.local_locast = locast_service
PlexHttpHandler.location = location
# init station scans
tmp_rmg_scans = []
for x in range(int(config['main']['tuner_count'])):
tmp_rmg_scans.append('Idle')
PlexHttpHandler.rmg_station_scans = tmp_rmg_scans
self.socket = serverSocket
self.daemon = True
self.start()
def run(self):
httpd = HTTPServer((self.bind_ip, int(self.bind_port)), PlexHttpHandler, False)
httpd.socket = self.socket
httpd.server_bind = self.server_close = lambda self: None
httpd.serve_forever()
def start(config, locast, location):
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serverSocket.bind((config["main"]['bind_ip'], int(config["main"]['bind_port'])))
serverSocket.listen(int(config["main"]["concurrent_listeners"]))
print("Now listening for requests.")
for i in range(int(config["main"]["concurrent_listeners"])):
PlexHttpServer(serverSocket, config, locast, location) | en | 0.822147 | # with help from https://www.acmesystems.it/python_http # and https://stackoverflow.com/questions/21631799/how-can-i-pass-parameters-to-a-requesthandler # using class variables since this should only be set once # paths and logic mostly pulled from telly:routes.go: https://github.com/tellytv/telly # TODO # TODO # otherwise, we're streaming, and the value will be the channel triplet # putting this here after the response on purpose # putting this here after the response on purpose # keep track of how many tuners we can use at a time # the first idle tuner gets it # get initial videodata. if that works, then keep grabbing it # from https://stackoverflow.com/questions/9932332 # Check we hit a broken pipe when trying to write back to the client # Send SIGTERM to shutdown ffmpeg # ffmpeg writes a bit of data out to stderr after it terminates, # need to read any hanging data to prevent a zombie process. # mostly from https://github.com/ZeWaren/python-upnp-ssdp-example # and https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port # init station scans | 2.305295 | 2 |
{{ cookiecutter.project_name }}/{{ cookiecutter.project_name }}/local/pages/views.py | dcs3spp/cookiecutter-django-api | 0 | 411 | from django import template
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
@login_required(login_url="/login/")
def index(request):
context = {}
context["segment"] = "index"
html_template = loader.get_template("index.html")
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split("/")[-1]
context["segment"] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template("page-404.html")
return HttpResponse(html_template.render(context, request))
except: # noqa: E722
html_template = loader.get_template("page-500.html")
return HttpResponse(html_template.render(context, request))
| from django import template
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
@login_required(login_url="/login/")
def index(request):
context = {}
context["segment"] = "index"
html_template = loader.get_template("index.html")
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split("/")[-1]
context["segment"] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template("page-404.html")
return HttpResponse(html_template.render(context, request))
except: # noqa: E722
html_template = loader.get_template("page-500.html")
return HttpResponse(html_template.render(context, request))
| en | 0.636507 | # All resource paths end in .html. # Pick out the html file name from the url. And load that template. # noqa: E722 | 2.413016 | 2 |
audiomentations/core/utils.py | jeongyoonlee/audiomentations | 1 | 412 | <reponame>jeongyoonlee/audiomentations<filename>audiomentations/core/utils.py
import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16)
| import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16) | en | 0.742636 | Return a list of paths to all files with the given filename extensions in a directory. Also traverses subdirectories by default. # prevent descending into subfolders Given a numpy array of audio samples, return its Root Mean Square (RMS). Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR), calculate the desired RMS of a noise sound to be mixed in. Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20 :param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0 :param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60 :return: Return bool that answers the question: Is the given ndarray a multichannel waveform or not? :param samples: numpy ndarray :return: Return bool that answers the question: Is the given ndarray a multichannel spectrogram? :param samples: numpy ndarray :return: Convert floating-point numpy array of audio samples to int16. | 3.194293 | 3 |
algorithm/python/LeetCode/isValid.py | HoneyS2/meaningful | 0 | 413 | <reponame>HoneyS2/meaningful
s = "([}}])"
stack = []
if len(s) % 2 == 1:
print(False)
exit()
for i in s:
if i == "(":
stack.append("(")
elif i == "[":
stack.append("[")
elif i == "{":
stack.append("{")
elif i == ")":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "(":
stack.pop()
else:
print(False)
exit()
elif i == "]":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "[":
stack.pop()
else:
print(False)
exit()
elif i == "}":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "{":
stack.pop()
else:
print(False)
exit()
if len(stack) == 0:
print(True)
else:
print(False)
| s = "([}}])"
stack = []
if len(s) % 2 == 1:
print(False)
exit()
for i in s:
if i == "(":
stack.append("(")
elif i == "[":
stack.append("[")
elif i == "{":
stack.append("{")
elif i == ")":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "(":
stack.pop()
else:
print(False)
exit()
elif i == "]":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "[":
stack.pop()
else:
print(False)
exit()
elif i == "}":
if len(stack) < 1:
print(False)
exit()
if stack[-1] == "{":
stack.pop()
else:
print(False)
exit()
if len(stack) == 0:
print(True)
else:
print(False) | none | 1 | 3.718874 | 4 |
|
JupyterHTMLSlides/core.py | williamegomezo/JupyterSlides | 1 | 414 | import random
import string
import os
from IPython.display import display, HTML
from .utils import html_loader
from .utils import get_content
from jinja2 import Template
class JupyterSlides:
def __init__(
self,
content_path='./content.yaml',
table_contents=False
):
self.set_base_dirs()
self.set_source_dirs()
self.content = get_content(content_path)
self.render_init_templates()
if table_contents:
self.render_table_contents()
def set_base_dirs(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.base_template_dir = f'{self.module_path}/src/templates'
self.base_css_dir = f'{self.module_path}/src/assets/css'
self.base_js_dir = f'{self.module_path}/src/js'
def set_source_dirs(self):
self.called_from_path = os.getcwd()
folders = self.called_from_path.split('/')
self.source_path = '/'.join(folders[:folders.index('talks')])
self.template_dir = f'{self.source_path}/src/templates'
self.css_dir = f'{self.source_path}/src/css'
self.js_dir = f'{self.source_path}/src/js'
def render_init_templates(self):
self.render(
template='init',
data={'dir': self.module_path},
template_dir=self.base_template_dir
)
if os.path.isfile(f'{self.template_dir}/init.html'):
self.render(
template=f'init',
data=self.content.get('init_vars', {})
)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
def render_table_contents(self):
if os.path.isfile(f'{self.template_dir}/table-contents.html'):
contents_template_dir = self.template_dir
else:
contents_template_dir = self.base_template_dir
self.render(
template='table-contents',
data=self.generate_table_contents(),
template_dir=contents_template_dir,
render_type='slide'
)
def parse_template(self, template=None, data={}, template_dir=None, render_type=None):
if not template_dir:
if os.path.isfile(f'{self.template_dir}/{template}.html'):
html = html_loader(f'file:{self.template_dir}/{template}.html')
else:
template = 'basic-slide'
html = html_loader(f'file:{self.base_template_dir}/{template}.html')
else:
if not os.path.isfile(f'{template_dir}/{template}.html'):
template = 'basic-slide'
template_dir = self.base_template_dir
html = html_loader(
f'file:{template_dir}/{template}.html')
if render_type == 'slide':
html = '<div id="{{ data["slide_id"] }}" class="slide-container">' + \
html + '</div>'
tm = Template(html)
return tm.render(data=data)
def render(self, template=None, data={}, navigation=False, template_dir=None, render_type=None):
html = self.parse_template(
template=template,
data=data,
template_dir=template_dir,
render_type=render_type
)
if navigation:
navigation_template = self.parse_template(
template='navigation',
template_dir=template_dir
)
html += navigation_template
display(HTML(html))
def render_content(self, key):
data = self.content.get(key)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
if data.get('slides'):
for el in data.get('slides'):
template = el.get('template')
self.render(template=template, data=el, render_type='slide')
@staticmethod
def randomUUID(stringLength=20):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def generate_table_contents(self):
table = {}
items = []
for _, item in self.content.items():
for sub_item in item['slides']:
sub_item['slide_id'] = \
str(item['indice']) + '.' + str(sub_item['indice']) +\
sub_item['content_title']
item['slide_id'] = item['slides'][0]['slide_id']
items.append(item)
table['title'] = 'Table of Contents'
table['eyebrow'] = 'Table of Contents'
table['items'] = items
return table
| import random
import string
import os
from IPython.display import display, HTML
from .utils import html_loader
from .utils import get_content
from jinja2 import Template
class JupyterSlides:
def __init__(
self,
content_path='./content.yaml',
table_contents=False
):
self.set_base_dirs()
self.set_source_dirs()
self.content = get_content(content_path)
self.render_init_templates()
if table_contents:
self.render_table_contents()
def set_base_dirs(self):
self.module_path = os.path.dirname(os.path.realpath(__file__))
self.base_template_dir = f'{self.module_path}/src/templates'
self.base_css_dir = f'{self.module_path}/src/assets/css'
self.base_js_dir = f'{self.module_path}/src/js'
def set_source_dirs(self):
self.called_from_path = os.getcwd()
folders = self.called_from_path.split('/')
self.source_path = '/'.join(folders[:folders.index('talks')])
self.template_dir = f'{self.source_path}/src/templates'
self.css_dir = f'{self.source_path}/src/css'
self.js_dir = f'{self.source_path}/src/js'
def render_init_templates(self):
self.render(
template='init',
data={'dir': self.module_path},
template_dir=self.base_template_dir
)
if os.path.isfile(f'{self.template_dir}/init.html'):
self.render(
template=f'init',
data=self.content.get('init_vars', {})
)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
def render_table_contents(self):
if os.path.isfile(f'{self.template_dir}/table-contents.html'):
contents_template_dir = self.template_dir
else:
contents_template_dir = self.base_template_dir
self.render(
template='table-contents',
data=self.generate_table_contents(),
template_dir=contents_template_dir,
render_type='slide'
)
def parse_template(self, template=None, data={}, template_dir=None, render_type=None):
if not template_dir:
if os.path.isfile(f'{self.template_dir}/{template}.html'):
html = html_loader(f'file:{self.template_dir}/{template}.html')
else:
template = 'basic-slide'
html = html_loader(f'file:{self.base_template_dir}/{template}.html')
else:
if not os.path.isfile(f'{template_dir}/{template}.html'):
template = 'basic-slide'
template_dir = self.base_template_dir
html = html_loader(
f'file:{template_dir}/{template}.html')
if render_type == 'slide':
html = '<div id="{{ data["slide_id"] }}" class="slide-container">' + \
html + '</div>'
tm = Template(html)
return tm.render(data=data)
def render(self, template=None, data={}, navigation=False, template_dir=None, render_type=None):
html = self.parse_template(
template=template,
data=data,
template_dir=template_dir,
render_type=render_type
)
if navigation:
navigation_template = self.parse_template(
template='navigation',
template_dir=template_dir
)
html += navigation_template
display(HTML(html))
def render_content(self, key):
data = self.content.get(key)
id = JupyterSlides.randomUUID()
self.render(
template='eye',
data={'eye_id': id},
template_dir=self.base_template_dir
)
if data.get('slides'):
for el in data.get('slides'):
template = el.get('template')
self.render(template=template, data=el, render_type='slide')
@staticmethod
def randomUUID(stringLength=20):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def generate_table_contents(self):
table = {}
items = []
for _, item in self.content.items():
for sub_item in item['slides']:
sub_item['slide_id'] = \
str(item['indice']) + '.' + str(sub_item['indice']) +\
sub_item['content_title']
item['slide_id'] = item['slides'][0]['slide_id']
items.append(item)
table['title'] = 'Table of Contents'
table['eyebrow'] = 'Table of Contents'
table['items'] = items
return table
| en | 0.429556 | Generate a random string of fixed length | 2.30603 | 2 |
tests/test_wallet.py | NickeZ/lightning | 1 | 415 | from decimal import Decimal
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError, Millisatoshi
from utils import (
only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT,
VALGRIND
)
import os
import pytest
import subprocess
import time
import unittest
@unittest.skipIf(TEST_NETWORK != 'regtest', "Test relies on a number of example addresses valid only in regtest")
def test_withdraw(node_factory, bitcoind):
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.rpc.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, 2 * amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
l1.bitcoin.generate_block(1)
sync_blockheight(l1.bitcoin, [l1])
# Check that there are no unconfirmed outputs (change should be confirmed)
for o in l1.rpc.listfunds()['outputs']:
assert o['status'] == 'confirmed'
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 2 * amount
# Now make sure an additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', 2 * amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', 2 * amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', 2 * amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', 2 * amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', 2 * amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', 2 * amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', 2 * amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', 2 * amount)
# Should have 6 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 6
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, 'all')
# Add some funds to withdraw
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
withdrawal = l1.rpc.withdraw(waddr, 2 * amount, utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(withdrawal['tx'])
assert decode['txid'] == withdrawal['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
def test_minconf_withdraw(node_factory, bitcoind):
"""Issue 2518: ensure that ridiculous confirmation levels don't overflow
The number of confirmations is used to compute a maximum height that is to
be accepted. If the current height is smaller than the number of
confirmations we wrap around and just select everything. The fix is to
clamp the maxheight parameter to a positive small number.
"""
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
with pytest.raises(RpcError):
l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999)
def test_addfunds_from_block(node_factory, bitcoind):
"""Send funds to the daemon without telling it explicitly
"""
# Previous runs with same bitcoind can leave funds!
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 0.1)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 10000000
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
# Send all our money to a P2WPKH address this time.
addr = l1.rpc.newaddr("bech32")['bech32']
l1.rpc.withdraw(addr, "all")
bitcoind.generate_block(1)
time.sleep(1)
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_txprepare(node_factory, bitcoind):
"""Test the deprecated old-style:
txprepare {destination} {satoshi} {feerate} {minconf}
"""
amount = 10**4
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
addr = l1.rpc.newaddr()['bech32']
for i in range(7):
l1.fundwallet(10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 7)
# Array type
with pytest.raises(RpcError, match=r'.* should be an amount in satoshis or all, not .*'):
l1.rpc.call('txprepare', [addr, 'slow'])
with pytest.raises(RpcError, match=r'Need set \'satoshi\' field.'):
l1.rpc.call('txprepare', [addr])
with pytest.raises(RpcError, match=r'Could not parse destination address.*'):
l1.rpc.call('txprepare', [Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'normal'])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), None, 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100)])
# Object type
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'destination': addr, 'feerate': 'slow'})
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'satoshi': Millisatoshi(amount * 100), 'feerate': '10perkw', 'minconf': 2})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw', 'minconf': 1})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw'})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100)})
def test_txprepare_multi(node_factory, bitcoind):
amount = 10000000
l1 = node_factory.get_node(random_hsm=True)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = []
for i in range(9):
outputs.append({l1.rpc.newaddr()['bech32']: Millisatoshi(amount * 100)})
prep = l1.rpc.txprepare(outputs=outputs)
l1.rpc.txdiscard(prep['txid'])
def test_txprepare(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare(outputs=[{addr: Millisatoshi(amount * 3 * 1000)}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# 4 inputs, 2 outputs (3 if we have a fee output).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 2 if not chainparams['feeoutput'] else 3
# One output will be correct.
outnum = [i for i, o in enumerate(decode['vout']) if o['value'] == Decimal(amount * 3) / 10**8][0]
for i, o in enumerate(decode['vout']):
if i == outnum:
assert o['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert o['scriptPubKey']['addresses'] == [addr]
else:
assert o['scriptPubKey']['type'] in ['witness_v0_keyhash', 'fee']
# Now prepare one with no change.
prep2 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep2['unsigned_tx'])
assert decode['txid'] == prep2['txid']
# 6 inputs, 1 outputs.
assert len(decode['vin']) == 6
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be paid.
assert decode['vout'][0]['value'] < Decimal(amount * 6) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 6) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# If I cancel the first one, I can get those first 4 outputs.
discard = l1.rpc.txdiscard(prep['txid'])
assert discard['txid'] == prep['txid']
assert discard['unsigned_tx'] == prep['unsigned_tx']
prep3 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep3['unsigned_tx'])
assert decode['txid'] == prep3['txid']
# 4 inputs, 1 outputs.
assert len(decode['vin']) == 4
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 4) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 4) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# Cannot discard twice.
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
# Discard everything, we should now spend all inputs.
l1.rpc.txdiscard(prep2['txid'])
l1.rpc.txdiscard(prep3['txid'])
prep4 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep4['unsigned_tx'])
assert decode['txid'] == prep4['txid']
# 10 inputs, 1 outputs.
assert len(decode['vin']) == 10
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 10) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 10) / 10**8 - Decimal(0.0003)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
l1.rpc.txdiscard(prep4['txid'])
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
prep5 = l1.rpc.txprepare([{addr:
Millisatoshi(amount * 3.5 * 1000)}], utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
# We should have a change output, so this is exact
assert len(decode['vout']) == 3 if chainparams['feeoutput'] else 2
assert decode['vout'][1]['value'] == Decimal(amount * 3.5) / 10**8
assert decode['vout'][1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][1]['scriptPubKey']['addresses'] == [addr]
# Discard prep4 and get all funds again
l1.rpc.txdiscard(prep5['txid'])
with pytest.raises(RpcError, match=r'this destination wants all satoshi. The count of outputs can\'t be more than 1'):
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)},
{addr: 'all'}])
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 500 + 100000)},
{addr: Millisatoshi(amount * 3 * 500 - 100000)}])
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# 4 inputs, 3 outputs(include change).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 4 if chainparams['feeoutput'] else 3
# One output will be correct.
for i in range(3 + chainparams['feeoutput']):
if decode['vout'][i - 1]['value'] == Decimal('0.01500100'):
outnum1 = i - 1
elif decode['vout'][i - 1]['value'] == Decimal('0.01499900'):
outnum2 = i - 1
else:
changenum = i - 1
assert decode['vout'][outnum1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum1]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][outnum2]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum2]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][changenum]['scriptPubKey']['type'] == 'witness_v0_keyhash'
def test_txsend(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)}])
out = l1.rpc.txsend(prep['txid'])
# Cannot discard after send!
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
wait_for(lambda: prep['txid'] in bitcoind.rpc.getrawmempool())
# Signed tx should have same txid
decode = bitcoind.rpc.decoderawtransaction(out['tx'])
assert decode['txid'] == prep['txid']
bitcoind.generate_block(1)
# Change output should appear.
if decode['vout'][0]['value'] == Decimal(amount * 3) / 10**8:
changenum = 1
elif decode['vout'][1]['value'] == Decimal(amount * 3) / 10**8:
changenum = 0
else:
assert False
# Those spent outputs are gone, but change output has arrived.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10 - len(decode['vin']) + 1)
# Change address should appear in listfunds()
assert decode['vout'][changenum]['scriptPubKey']['addresses'][0] in [f['address'] for f in l1.rpc.listfunds()['outputs']]
def test_txprepare_restart(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(may_fail=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# L1 will forget all about it.
l1.restart()
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# This will also work if we simply kill it.
l1.restart(clean=False)
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
# It should have logged this for each output.
for i in decode['vin']:
assert l1.daemon.is_in_log('wallet: reserved output {}/{} reset to available'.format(i['txid'], i['vout']))
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee outputs throw off our output matching logic")
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Tests annotations which are compiled only with experimental features")
def test_transaction_annotations(node_factory, bitcoind):
l1, l2, l3 = node_factory.get_nodes(3)
l1.fundwallet(10**6)
# We should now have a transaction that gave us the funds in the
# transactions table...
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
out = outputs[0]
idx = out['output']
assert(idx in [0, 1] and out['value'] == 10**6)
# ... and it should have an annotation on the output reading 'deposit'
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 1)
tx = txs[0]
output = tx['outputs'][idx]
assert(output['type'] == 'deposit' and output['satoshis'] == '1000000000msat')
# ... and all other output should be change, and have no annotations
types = []
for i, o in enumerate(tx['outputs']):
if i == idx:
continue
if 'type' in o:
types.append(o['type'])
else:
types.append(None)
assert(set([None]) == set(types))
##########################################################################
# Let's now open a channel. The opener should get the funding transaction
# annotated as channel open and deposit.
l1.connect(l2)
fundingtx = l1.rpc.fundchannel(l2.info['id'], 10**5)
# We should have one output available, and it should be unconfirmed
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'unconfirmed')
# It should also match the funding txid:
assert(outputs[0]['txid'] == fundingtx['txid'])
# Confirm the channel and check that the output changed to confirmed
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l1, l2])
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
# We should have 2 transactions, the second one should be the funding tx
# (we are ordering by blockheight and txindex, so that order should be ok)
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 2 and txs[1]['hash'] == fundingtx['txid'])
# Check the annotated types
types = [o['type'] for o in txs[1]['outputs']]
changeidx = 0 if types[0] == 'deposit' else 1
fundidx = 1 - changeidx
assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding')
# And check the channel annotation on the funding output
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and len(peers[0]['channels']) == 1)
scid = peers[0]['channels'][0]['short_channel_id']
assert(txs[1]['outputs'][fundidx]['channel'] == scid)
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsm_secret_encryption(node_factory):
l1 = node_factory.get_node(may_fail=True) # May fail when started without key
password = "<PASSWORD>"
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Test we can encrypt an already-existing and not encrypted hsm_secret
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
id = l1.rpc.getinfo()["id"]
l1.stop()
# Test we cannot start the same wallet without specifying --encrypted-hsm
l1.daemon.opts.pop("encrypted-hsm")
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
# Test we cannot restore the same wallet with another password
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password[2:].encode("utf-8"))
assert(l1.daemon.proc.wait() == 1)
assert(l1.daemon.is_in_log("Wrong password for encrypted hsm_secret."))
# Test we can restore the same wallet with the same password
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert id == l1.rpc.getinfo()["id"]
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsmtool_secret_decryption(node_factory):
l1 = node_factory.get_node()
password = "<PASSWORD>"
hsm_path = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "hsm_secret")
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Encrypt the master seed
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
node_id = l1.rpc.getinfo()["id"]
l1.stop()
# We can't use a wrong password !
cmd_line = ["tools/hsmtool", "decrypt", hsm_path, "A wrong pass"]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(cmd_line)
# Decrypt it with hsmtool
cmd_line[3] = password[:-1]
subprocess.check_call(cmd_line)
# Then test we can now start it without password
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# Test we can encrypt it offline
cmd_line[1] = "encrypt"
subprocess.check_call(cmd_line)
# Now we need to pass the encrypted-hsm startup option
l1.stop()
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
l1.daemon.opts.update({"encrypted-hsm": None})
master_fd, slave_fd = os.openpty()
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# And finally test that we can also decrypt if encrypted with hsmtool
cmd_line[1] = "decrypt"
subprocess.check_call(cmd_line)
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
# this test does a 'listtransactions' on a yet unconfirmed channel
def test_fundchannel_listtransaction(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2)
l1.fundwallet(10**6)
l1.connect(l2)
txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid']
# next call warned about SQL Accessing a null column
# and crashed the daemon for accessing random memory or null
txs = l1.rpc.listtransactions()['transactions']
tx = [t for t in txs if t['hash'] == txid][0]
assert tx['blockheight'] == 0
def test_withdraw_nlocktime(node_factory):
"""
Test that we don't set the nLockTime to 0 for withdrawal transactions.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**4)
addr = l1.rpc.newaddr()["bech32"]
tx = l1.rpc.withdraw(addr, 10**3)["tx"]
nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"]
tip = node_factory.bitcoind.rpc.getblockcount()
assert nlocktime > 0 and nlocktime <= tip
@flaky
@unittest.skipIf(VALGRIND, "A big loop is used to check fuzz.")
def test_withdraw_nlocktime_fuzz(node_factory, bitcoind):
"""
Test that we eventually fuzz nLockTime for withdrawal transactions.
Marked flaky "just in case" as we fuzz from 0 to 100 with a 10%
probability.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**8)
for i in range(100):
addr = l1.rpc.newaddr()["bech32"]
withdraw = l1.rpc.withdraw(addr, 10**3)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.
format(withdraw["txid"]))
decoded = bitcoind.rpc.decoderawtransaction(withdraw["tx"])
tip = node_factory.bitcoind.rpc.getblockcount()
assert decoded["locktime"] > 0
if decoded["locktime"] < tip:
return
else:
raise Exception("No transaction with fuzzed nLockTime !")
| from decimal import Decimal
from fixtures import * # noqa: F401,F403
from fixtures import TEST_NETWORK
from flaky import flaky # noqa: F401
from pyln.client import RpcError, Millisatoshi
from utils import (
only_one, wait_for, sync_blockheight, EXPERIMENTAL_FEATURES, COMPAT,
VALGRIND
)
import os
import pytest
import subprocess
import time
import unittest
@unittest.skipIf(TEST_NETWORK != 'regtest', "Test relies on a number of example addresses valid only in regtest")
def test_withdraw(node_factory, bitcoind):
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
l2 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Reach around into the db to check that outputs were added
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 10
waddr = l1.bitcoin.rpc.getnewaddress()
# Now attempt to withdraw some (making sure we collect multiple inputs)
with pytest.raises(RpcError):
l1.rpc.withdraw('not an address', amount)
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, 'not an amount')
with pytest.raises(RpcError):
l1.rpc.withdraw(waddr, -amount)
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, amount * 100)
out = l1.rpc.withdraw(waddr, 2 * amount)
# Make sure bitcoind received the withdrawal
unspent = l1.bitcoin.rpc.listunspent(0)
withdrawal = [u for u in unspent if u['txid'] == out['txid']]
assert(withdrawal[0]['amount'] == Decimal('0.02'))
l1.bitcoin.generate_block(1)
sync_blockheight(l1.bitcoin, [l1])
# Check that there are no unconfirmed outputs (change should be confirmed)
for o in l1.rpc.listfunds()['outputs']:
assert o['status'] == 'confirmed'
# Now make sure two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 2
# Now send some money to l2.
# lightningd uses P2SH-P2WPKH
waddr = l2.rpc.newaddr('bech32')['bech32']
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Make sure l2 received the withdrawal.
wait_for(lambda: len(l2.rpc.listfunds()['outputs']) == 1)
outputs = l2.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 2 * amount
# Now make sure an additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 4
# Simple test for withdrawal to P2WPKH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kygt080'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qw508d6qejxtdg4y5r3zarvary0c5xw7kxpjzsx', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1pw508d6qejxtdg4y5r3zarvary0c5xw7kdl9fad', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qw508d6qejxtdg4y5r3zarvary0c5xw7kxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 6
# Simple test for withdrawal to P2WSH
# Address from: https://bc-2.jp/tools/bech32demo/index.html
waddr = 'bcrt1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qzf4jry'
with pytest.raises(RpcError):
l1.rpc.withdraw('xx1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1prp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qsm03tq', 2 * amount)
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qxxxxxx', 2 * amount)
l1.rpc.withdraw(waddr, 2 * amount)
bitcoind.generate_block(1)
# Now make sure additional two of them were marked as spent
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=2')[0]['c'] == 8
# failure testing for invalid SegWit addresses, from BIP173
# HRP character out of range
with pytest.raises(RpcError):
l1.rpc.withdraw(' 1nwldj5', 2 * amount)
# overall max length exceeded
with pytest.raises(RpcError):
l1.rpc.withdraw('an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx', 2 * amount)
# No separator character
with pytest.raises(RpcError):
l1.rpc.withdraw('pzry9x0s0muk', 2 * amount)
# Empty HRP
with pytest.raises(RpcError):
l1.rpc.withdraw('1pzry9x0s0muk', 2 * amount)
# Invalid witness version
with pytest.raises(RpcError):
l1.rpc.withdraw('BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2', 2 * amount)
# Invalid program length for witness version 0 (per BIP141)
with pytest.raises(RpcError):
l1.rpc.withdraw('BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P', 2 * amount)
# Mixed case
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7', 2 * amount)
# Non-zero padding in 8-to-5 conversion
with pytest.raises(RpcError):
l1.rpc.withdraw('tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv', 2 * amount)
# Should have 6 outputs available.
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 6
# Test withdrawal to self.
l1.rpc.withdraw(l1.rpc.newaddr('bech32')['bech32'], 'all', minconf=0)
bitcoind.generate_block(1)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 1
l1.rpc.withdraw(waddr, 'all', minconf=0)
assert l1.db_query('SELECT COUNT(*) as c FROM outputs WHERE status=0')[0]['c'] == 0
# This should fail, can't even afford fee.
with pytest.raises(RpcError, match=r'Cannot afford transaction'):
l1.rpc.withdraw(waddr, 'all')
# Add some funds to withdraw
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
withdrawal = l1.rpc.withdraw(waddr, 2 * amount, utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(withdrawal['tx'])
assert decode['txid'] == withdrawal['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
def test_minconf_withdraw(node_factory, bitcoind):
"""Issue 2518: ensure that ridiculous confirmation levels don't overflow
The number of confirmations is used to compute a maximum height that is to
be accepted. If the current height is smaller than the number of
confirmations we wrap around and just select everything. The fix is to
clamp the maxheight parameter to a positive small number.
"""
amount = 1000000
# Don't get any funds from previous runs.
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
# Add some funds to withdraw later
for i in range(10):
l1.bitcoin.rpc.sendtoaddress(addr, amount / 10**8 + 0.01)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
with pytest.raises(RpcError):
l1.rpc.withdraw(destination=addr, satoshi=10000, feerate='normal', minconf=9999999)
def test_addfunds_from_block(node_factory, bitcoind):
"""Send funds to the daemon without telling it explicitly
"""
# Previous runs with same bitcoind can leave funds!
l1 = node_factory.get_node(random_hsm=True)
addr = l1.rpc.newaddr()['bech32']
bitcoind.rpc.sendtoaddress(addr, 0.1)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = l1.db_query('SELECT value FROM outputs WHERE status=0;')
assert only_one(outputs)['value'] == 10000000
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
# Send all our money to a P2WPKH address this time.
addr = l1.rpc.newaddr("bech32")['bech32']
l1.rpc.withdraw(addr, "all")
bitcoind.generate_block(1)
time.sleep(1)
# The address we detect must match what was paid to.
output = only_one(l1.rpc.listfunds()['outputs'])
assert output['address'] == addr
@unittest.skipIf(not COMPAT, "needs COMPAT=1")
def test_deprecated_txprepare(node_factory, bitcoind):
"""Test the deprecated old-style:
txprepare {destination} {satoshi} {feerate} {minconf}
"""
amount = 10**4
l1 = node_factory.get_node(options={'allow-deprecated-apis': True})
addr = l1.rpc.newaddr()['bech32']
for i in range(7):
l1.fundwallet(10**8)
bitcoind.generate_block(1)
sync_blockheight(bitcoind, [l1])
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 7)
# Array type
with pytest.raises(RpcError, match=r'.* should be an amount in satoshis or all, not .*'):
l1.rpc.call('txprepare', [addr, 'slow'])
with pytest.raises(RpcError, match=r'Need set \'satoshi\' field.'):
l1.rpc.call('txprepare', [addr])
with pytest.raises(RpcError, match=r'Could not parse destination address.*'):
l1.rpc.call('txprepare', [Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'slow', 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), 'normal'])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100), None, 1])
l1.rpc.call('txprepare', [addr, Millisatoshi(amount * 100)])
# Object type
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'destination': addr, 'feerate': 'slow'})
with pytest.raises(RpcError, match=r'Need set \'outputs\' field.'):
l1.rpc.call('txprepare', {'satoshi': Millisatoshi(amount * 100), 'feerate': '10perkw', 'minconf': 2})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw', 'minconf': 1})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100), 'feerate': '2000perkw'})
l1.rpc.call('txprepare', {'destination': addr, 'satoshi': Millisatoshi(amount * 100)})
def test_txprepare_multi(node_factory, bitcoind):
amount = 10000000
l1 = node_factory.get_node(random_hsm=True)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'], amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 1)
outputs = []
for i in range(9):
outputs.append({l1.rpc.newaddr()['bech32']: Millisatoshi(amount * 100)})
prep = l1.rpc.txprepare(outputs=outputs)
l1.rpc.txdiscard(prep['txid'])
def test_txprepare(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare(outputs=[{addr: Millisatoshi(amount * 3 * 1000)}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# 4 inputs, 2 outputs (3 if we have a fee output).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 2 if not chainparams['feeoutput'] else 3
# One output will be correct.
outnum = [i for i, o in enumerate(decode['vout']) if o['value'] == Decimal(amount * 3) / 10**8][0]
for i, o in enumerate(decode['vout']):
if i == outnum:
assert o['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert o['scriptPubKey']['addresses'] == [addr]
else:
assert o['scriptPubKey']['type'] in ['witness_v0_keyhash', 'fee']
# Now prepare one with no change.
prep2 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep2['unsigned_tx'])
assert decode['txid'] == prep2['txid']
# 6 inputs, 1 outputs.
assert len(decode['vin']) == 6
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be paid.
assert decode['vout'][0]['value'] < Decimal(amount * 6) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 6) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# If I cancel the first one, I can get those first 4 outputs.
discard = l1.rpc.txdiscard(prep['txid'])
assert discard['txid'] == prep['txid']
assert discard['unsigned_tx'] == prep['unsigned_tx']
prep3 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep3['unsigned_tx'])
assert decode['txid'] == prep3['txid']
# 4 inputs, 1 outputs.
assert len(decode['vin']) == 4
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 4) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 4) / 10**8 - Decimal(0.0002)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
# Cannot discard twice.
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
# Discard everything, we should now spend all inputs.
l1.rpc.txdiscard(prep2['txid'])
l1.rpc.txdiscard(prep3['txid'])
prep4 = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep4['unsigned_tx'])
assert decode['txid'] == prep4['txid']
# 10 inputs, 1 outputs.
assert len(decode['vin']) == 10
assert len(decode['vout']) == 1 if not chainparams['feeoutput'] else 2
# Some fees will be taken
assert decode['vout'][0]['value'] < Decimal(amount * 10) / 10**8
assert decode['vout'][0]['value'] > Decimal(amount * 10) / 10**8 - Decimal(0.0003)
assert decode['vout'][0]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][0]['scriptPubKey']['addresses'] == [addr]
l1.rpc.txdiscard(prep4['txid'])
# Try passing in a utxo set
utxos = [utxo["txid"] + ":" + str(utxo["output"]) for utxo in l1.rpc.listfunds()["outputs"]][:4]
prep5 = l1.rpc.txprepare([{addr:
Millisatoshi(amount * 3.5 * 1000)}], utxos=utxos)
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# Check that correct utxos are included
assert len(decode['vin']) == 4
vins = ["{}:{}".format(v['txid'], v['vout']) for v in decode['vin']]
for utxo in utxos:
assert utxo in vins
# We should have a change output, so this is exact
assert len(decode['vout']) == 3 if chainparams['feeoutput'] else 2
assert decode['vout'][1]['value'] == Decimal(amount * 3.5) / 10**8
assert decode['vout'][1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][1]['scriptPubKey']['addresses'] == [addr]
# Discard prep4 and get all funds again
l1.rpc.txdiscard(prep5['txid'])
with pytest.raises(RpcError, match=r'this destination wants all satoshi. The count of outputs can\'t be more than 1'):
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)},
{addr: 'all'}])
prep5 = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 500 + 100000)},
{addr: Millisatoshi(amount * 3 * 500 - 100000)}])
decode = bitcoind.rpc.decoderawtransaction(prep5['unsigned_tx'])
assert decode['txid'] == prep5['txid']
# 4 inputs, 3 outputs(include change).
assert len(decode['vin']) == 4
assert len(decode['vout']) == 4 if chainparams['feeoutput'] else 3
# One output will be correct.
for i in range(3 + chainparams['feeoutput']):
if decode['vout'][i - 1]['value'] == Decimal('0.01500100'):
outnum1 = i - 1
elif decode['vout'][i - 1]['value'] == Decimal('0.01499900'):
outnum2 = i - 1
else:
changenum = i - 1
assert decode['vout'][outnum1]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum1]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][outnum2]['scriptPubKey']['type'] == 'witness_v0_keyhash'
assert decode['vout'][outnum2]['scriptPubKey']['addresses'] == [addr]
assert decode['vout'][changenum]['scriptPubKey']['type'] == 'witness_v0_keyhash'
def test_txsend(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(random_hsm=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10)
prep = l1.rpc.txprepare([{addr: Millisatoshi(amount * 3 * 1000)}])
out = l1.rpc.txsend(prep['txid'])
# Cannot discard after send!
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
wait_for(lambda: prep['txid'] in bitcoind.rpc.getrawmempool())
# Signed tx should have same txid
decode = bitcoind.rpc.decoderawtransaction(out['tx'])
assert decode['txid'] == prep['txid']
bitcoind.generate_block(1)
# Change output should appear.
if decode['vout'][0]['value'] == Decimal(amount * 3) / 10**8:
changenum = 1
elif decode['vout'][1]['value'] == Decimal(amount * 3) / 10**8:
changenum = 0
else:
assert False
# Those spent outputs are gone, but change output has arrived.
wait_for(lambda: len(l1.rpc.listfunds()['outputs']) == 10 - len(decode['vin']) + 1)
# Change address should appear in listfunds()
assert decode['vout'][changenum]['scriptPubKey']['addresses'][0] in [f['address'] for f in l1.rpc.listfunds()['outputs']]
def test_txprepare_restart(node_factory, bitcoind, chainparams):
amount = 1000000
l1 = node_factory.get_node(may_fail=True)
addr = chainparams['example_addr']
# Add some funds to withdraw later: both bech32 and p2sh
for i in range(5):
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr()['bech32'],
amount / 10**8)
bitcoind.rpc.sendtoaddress(l1.rpc.newaddr('p2sh-segwit')['p2sh-segwit'],
amount / 10**8)
bitcoind.generate_block(1)
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# L1 will forget all about it.
l1.restart()
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
with pytest.raises(RpcError, match=r'not an unreleased txid'):
l1.rpc.txdiscard(prep['txid'])
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
# This will also work if we simply kill it.
l1.restart(clean=False)
# It goes backwards in blockchain just in case there was a reorg. Wait.
wait_for(lambda: [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] * 10)
# It should have logged this for each output.
for i in decode['vin']:
assert l1.daemon.is_in_log('wallet: reserved output {}/{} reset to available'.format(i['txid'], i['vout']))
prep = l1.rpc.txprepare([{addr: 'all'}])
decode = bitcoind.rpc.decoderawtransaction(prep['unsigned_tx'])
assert decode['txid'] == prep['txid']
# All 10 inputs
assert len(decode['vin']) == 10
@unittest.skipIf(TEST_NETWORK != 'regtest', "Fee outputs throw off our output matching logic")
@unittest.skipIf(not EXPERIMENTAL_FEATURES, "Tests annotations which are compiled only with experimental features")
def test_transaction_annotations(node_factory, bitcoind):
l1, l2, l3 = node_factory.get_nodes(3)
l1.fundwallet(10**6)
# We should now have a transaction that gave us the funds in the
# transactions table...
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
out = outputs[0]
idx = out['output']
assert(idx in [0, 1] and out['value'] == 10**6)
# ... and it should have an annotation on the output reading 'deposit'
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 1)
tx = txs[0]
output = tx['outputs'][idx]
assert(output['type'] == 'deposit' and output['satoshis'] == '1000000000msat')
# ... and all other output should be change, and have no annotations
types = []
for i, o in enumerate(tx['outputs']):
if i == idx:
continue
if 'type' in o:
types.append(o['type'])
else:
types.append(None)
assert(set([None]) == set(types))
##########################################################################
# Let's now open a channel. The opener should get the funding transaction
# annotated as channel open and deposit.
l1.connect(l2)
fundingtx = l1.rpc.fundchannel(l2.info['id'], 10**5)
# We should have one output available, and it should be unconfirmed
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'unconfirmed')
# It should also match the funding txid:
assert(outputs[0]['txid'] == fundingtx['txid'])
# Confirm the channel and check that the output changed to confirmed
bitcoind.generate_block(3)
sync_blockheight(bitcoind, [l1, l2])
outputs = l1.rpc.listfunds()['outputs']
assert(len(outputs) == 1 and outputs[0]['status'] == 'confirmed')
# We should have 2 transactions, the second one should be the funding tx
# (we are ordering by blockheight and txindex, so that order should be ok)
txs = l1.rpc.listtransactions()['transactions']
assert(len(txs) == 2 and txs[1]['hash'] == fundingtx['txid'])
# Check the annotated types
types = [o['type'] for o in txs[1]['outputs']]
changeidx = 0 if types[0] == 'deposit' else 1
fundidx = 1 - changeidx
assert(types[changeidx] == 'deposit' and types[fundidx] == 'channel_funding')
# And check the channel annotation on the funding output
peers = l1.rpc.listpeers()['peers']
assert(len(peers) == 1 and len(peers[0]['channels']) == 1)
scid = peers[0]['channels'][0]['short_channel_id']
assert(txs[1]['outputs'][fundidx]['channel'] == scid)
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsm_secret_encryption(node_factory):
l1 = node_factory.get_node(may_fail=True) # May fail when started without key
password = "<PASSWORD>"
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Test we can encrypt an already-existing and not encrypted hsm_secret
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
id = l1.rpc.getinfo()["id"]
l1.stop()
# Test we cannot start the same wallet without specifying --encrypted-hsm
l1.daemon.opts.pop("encrypted-hsm")
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
# Test we cannot restore the same wallet with another password
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password[2:].encode("utf-8"))
assert(l1.daemon.proc.wait() == 1)
assert(l1.daemon.is_in_log("Wrong password for encrypted hsm_secret."))
# Test we can restore the same wallet with the same password
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert id == l1.rpc.getinfo()["id"]
@unittest.skipIf(VALGRIND, "It does not play well with prompt and key derivation.")
def test_hsmtool_secret_decryption(node_factory):
l1 = node_factory.get_node()
password = "<PASSWORD>"
hsm_path = os.path.join(l1.daemon.lightning_dir, TEST_NETWORK, "hsm_secret")
# We need to simulate a terminal to use termios in `lightningd`.
master_fd, slave_fd = os.openpty()
# Encrypt the master seed
l1.stop()
l1.daemon.opts.update({"encrypted-hsm": None})
l1.daemon.start(stdin=slave_fd, wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
node_id = l1.rpc.getinfo()["id"]
l1.stop()
# We can't use a wrong password !
cmd_line = ["tools/hsmtool", "decrypt", hsm_path, "A wrong pass"]
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(cmd_line)
# Decrypt it with hsmtool
cmd_line[3] = password[:-1]
subprocess.check_call(cmd_line)
# Then test we can now start it without password
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# Test we can encrypt it offline
cmd_line[1] = "encrypt"
subprocess.check_call(cmd_line)
# Now we need to pass the encrypted-hsm startup option
l1.stop()
with pytest.raises(subprocess.CalledProcessError, match=r'returned non-zero exit status 1'):
subprocess.check_call(l1.daemon.cmd_line)
l1.daemon.opts.update({"encrypted-hsm": None})
master_fd, slave_fd = os.openpty()
l1.daemon.start(stdin=slave_fd, stderr=subprocess.STDOUT,
wait_for_initialized=False)
l1.daemon.wait_for_log(r'The hsm_secret is encrypted')
os.write(master_fd, password.encode("utf-8"))
l1.daemon.wait_for_log("Server started with public key")
assert node_id == l1.rpc.getinfo()["id"]
l1.stop()
# And finally test that we can also decrypt if encrypted with hsmtool
cmd_line[1] = "decrypt"
subprocess.check_call(cmd_line)
l1.daemon.opts.pop("encrypted-hsm")
l1.daemon.start(stdin=slave_fd, wait_for_initialized=True)
assert node_id == l1.rpc.getinfo()["id"]
# this test does a 'listtransactions' on a yet unconfirmed channel
def test_fundchannel_listtransaction(node_factory, bitcoind):
l1, l2 = node_factory.get_nodes(2)
l1.fundwallet(10**6)
l1.connect(l2)
txid = l1.rpc.fundchannel(l2.info['id'], 10**5)['txid']
# next call warned about SQL Accessing a null column
# and crashed the daemon for accessing random memory or null
txs = l1.rpc.listtransactions()['transactions']
tx = [t for t in txs if t['hash'] == txid][0]
assert tx['blockheight'] == 0
def test_withdraw_nlocktime(node_factory):
"""
Test that we don't set the nLockTime to 0 for withdrawal transactions.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**4)
addr = l1.rpc.newaddr()["bech32"]
tx = l1.rpc.withdraw(addr, 10**3)["tx"]
nlocktime = node_factory.bitcoind.rpc.decoderawtransaction(tx)["locktime"]
tip = node_factory.bitcoind.rpc.getblockcount()
assert nlocktime > 0 and nlocktime <= tip
@flaky
@unittest.skipIf(VALGRIND, "A big loop is used to check fuzz.")
def test_withdraw_nlocktime_fuzz(node_factory, bitcoind):
"""
Test that we eventually fuzz nLockTime for withdrawal transactions.
Marked flaky "just in case" as we fuzz from 0 to 100 with a 10%
probability.
"""
l1 = node_factory.get_node(1)
l1.fundwallet(10**8)
for i in range(100):
addr = l1.rpc.newaddr()["bech32"]
withdraw = l1.rpc.withdraw(addr, 10**3)
bitcoind.generate_block(1)
l1.daemon.wait_for_log('Owning output .* txid {} CONFIRMED'.
format(withdraw["txid"]))
decoded = bitcoind.rpc.decoderawtransaction(withdraw["tx"])
tip = node_factory.bitcoind.rpc.getblockcount()
assert decoded["locktime"] > 0
if decoded["locktime"] < tip:
return
else:
raise Exception("No transaction with fuzzed nLockTime !")
| en | 0.922181 | # noqa: F401,F403 # noqa: F401 # Don't get any funds from previous runs. # Add some funds to withdraw later # Reach around into the db to check that outputs were added # Now attempt to withdraw some (making sure we collect multiple inputs) # Make sure bitcoind received the withdrawal # Check that there are no unconfirmed outputs (change should be confirmed) # Now make sure two of them were marked as spent # Now send some money to l2. # lightningd uses P2SH-P2WPKH # Make sure l2 received the withdrawal. # Now make sure an additional two of them were marked as spent # Simple test for withdrawal to P2WPKH # Address from: https://bc-2.jp/tools/bech32demo/index.html # Now make sure additional two of them were marked as spent # Simple test for withdrawal to P2WSH # Address from: https://bc-2.jp/tools/bech32demo/index.html # Now make sure additional two of them were marked as spent # failure testing for invalid SegWit addresses, from BIP173 # HRP character out of range # overall max length exceeded # No separator character # Empty HRP # Invalid witness version # Invalid program length for witness version 0 (per BIP141) # Mixed case # Non-zero padding in 8-to-5 conversion # Should have 6 outputs available. # Test withdrawal to self. # This should fail, can't even afford fee. # Add some funds to withdraw # Try passing in a utxo set # Check that correct utxos are included Issue 2518: ensure that ridiculous confirmation levels don't overflow The number of confirmations is used to compute a maximum height that is to be accepted. If the current height is smaller than the number of confirmations we wrap around and just select everything. The fix is to clamp the maxheight parameter to a positive small number. # Don't get any funds from previous runs. # Add some funds to withdraw later Send funds to the daemon without telling it explicitly # Previous runs with same bitcoind can leave funds! # The address we detect must match what was paid to. # Send all our money to a P2WPKH address this time. # The address we detect must match what was paid to. Test the deprecated old-style: txprepare {destination} {satoshi} {feerate} {minconf} # Array type # Object type # Add some funds to withdraw later: both bech32 and p2sh # 4 inputs, 2 outputs (3 if we have a fee output). # One output will be correct. # Now prepare one with no change. # 6 inputs, 1 outputs. # Some fees will be paid. # If I cancel the first one, I can get those first 4 outputs. # 4 inputs, 1 outputs. # Some fees will be taken # Cannot discard twice. # Discard everything, we should now spend all inputs. # 10 inputs, 1 outputs. # Some fees will be taken # Try passing in a utxo set # Check that correct utxos are included # We should have a change output, so this is exact # Discard prep4 and get all funds again # 4 inputs, 3 outputs(include change). # One output will be correct. # Add some funds to withdraw later: both bech32 and p2sh # Cannot discard after send! # Signed tx should have same txid # Change output should appear. # Those spent outputs are gone, but change output has arrived. # Change address should appear in listfunds() # Add some funds to withdraw later: both bech32 and p2sh # All 10 inputs # L1 will forget all about it. # It goes backwards in blockchain just in case there was a reorg. Wait. # All 10 inputs # This will also work if we simply kill it. # It goes backwards in blockchain just in case there was a reorg. Wait. # It should have logged this for each output. # All 10 inputs # We should now have a transaction that gave us the funds in the # transactions table... # ... and it should have an annotation on the output reading 'deposit' # ... and all other output should be change, and have no annotations ########################################################################## # Let's now open a channel. The opener should get the funding transaction # annotated as channel open and deposit. # We should have one output available, and it should be unconfirmed # It should also match the funding txid: # Confirm the channel and check that the output changed to confirmed # We should have 2 transactions, the second one should be the funding tx # (we are ordering by blockheight and txindex, so that order should be ok) # Check the annotated types # And check the channel annotation on the funding output # May fail when started without key # We need to simulate a terminal to use termios in `lightningd`. # Test we can encrypt an already-existing and not encrypted hsm_secret # Test we cannot start the same wallet without specifying --encrypted-hsm # Test we cannot restore the same wallet with another password # Test we can restore the same wallet with the same password # We need to simulate a terminal to use termios in `lightningd`. # Encrypt the master seed # We can't use a wrong password ! # Decrypt it with hsmtool # Then test we can now start it without password # Test we can encrypt it offline # Now we need to pass the encrypted-hsm startup option # And finally test that we can also decrypt if encrypted with hsmtool # this test does a 'listtransactions' on a yet unconfirmed channel # next call warned about SQL Accessing a null column # and crashed the daemon for accessing random memory or null Test that we don't set the nLockTime to 0 for withdrawal transactions. Test that we eventually fuzz nLockTime for withdrawal transactions. Marked flaky "just in case" as we fuzz from 0 to 100 with a 10% probability. | 1.993383 | 2 |
microbepy/plot/mutation_plot.py | ScienceStacks/MicrobEPy | 1 | 416 | <filename>microbepy/plot/mutation_plot.py
"""Provides plots of mutations for Isolates and Lines."""
from microbepy.common import constants as cn
from microbepy.common.dataframe_sorter import DataframeSorter
from microbepy.common.isolate import Isolate
from microbepy.common import util
from microbepy.correlation import genome_correlation
from microbepy.data.model_data_provider import ModelDataProvider
from microbepy.data import util_data
from microbepy.plot.mutation_cofraction import MutationCofraction
from microbepy.plot.util_plot import PlotParms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
COLORS = ['red', 'green', 'blue']
SPECIES = {cn.SPECIES_MIX_DVH: "DVH",
cn.SPECIES_MIX_MMP: "MMP",
None: "both"}
FONTSIZE_TITLE = 16
FONTSIZE_LABEL = 8
MAX_LINES = 9
MIN_FRACTION = 0.25
THRESHOLD_FRAC = 0.2
MAX_SIGLVL = 0.01
COLORBAR_MIN = 1.0
COLORBAR_MAX = 4.0
class MutationLinePlot(object):
"""
Plot mutations by occurrences within Lines.
"""
def __init__(self, mutation_column=cn.GGENE_ID, species=None,
is_plot=True):
"""
:param str mutation_column:
:param bool is_plot:
"""
self._mutation_column = mutation_column
self._is_plot = is_plot
self._species = species
self.cofraction = MutationCofraction(species=self._species,
mutation_column=mutation_column)
def plotTransfers(self,
parms=PlotParms(is_initialize=False),
is_unit_fraction = False,
is_cluster_mutations=True):
"""
Does a stacked bar plot of mutation frequency for all transfers.
:param bool is_unit_fraction: round fraction to 1
:param bool is_cluster_mutations: Group similar mutations together
:return pd.DataFrame: row=mutation, col=line + transfer, value is fraction
"""
permitted_mutations = self.cofraction.ordered_mutations
transfers = self.cofraction.transfers
num_transfers = len(transfers)
fig, axes = plt.subplots(nrows=num_transfers, ncols=1)
dfs = []
for idx, transfer in enumerate(transfers):
parms[cn.PLT_YTICKLABELS] = True
if self._species is None:
parms[cn.PLT_TITLE] = "%d" % transfer
else:
parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer)
if idx == 0:
parms[cn.PLT_YLABEL] = True
else:
parms[cn.PLT_YLABEL] = False
if idx < num_transfers - 1:
parms[cn.PLT_LEGEND] = False
parms[cn.PLT_XLABEL] = False
parms[cn.PLT_XTICKLABELS] = False
else:
parms[cn.PLT_LEGEND] = True
parms[cn.PLT_XLABEL] = True
parms[cn.PLT_XTICKLABELS] = True
df = self.plotLine(transfer,
parms=parms, is_plot=False,
ax=axes[idx], permitted_mutations=permitted_mutations,
is_unit_fraction=is_unit_fraction)
df[cn.TRANSFER] = transfer
dfs.append(df)
if self._is_plot:
plt.show()
return pd.concat(dfs)
def plotLine(self, transfer,
parms=PlotParms(is_initialize=False),
is_unit_fraction=False,
is_plot=None, ax=None, permitted_mutations=None):
"""
Does a stacked bar plot of mutation frequency by line
with colors
:params int transfer:
:params PlotParms parms:
:params Axis ax: axis to use in plot
:param list-str permitted_mutations: to use and how they
are ordered if None, then use alphabetical order
:param bool is_unit_fraction: round non-zero fraction to 1
:return pd.DataFrame: row=mutation, col=line, value is fraction
"""
if is_plot is None:
is_plot = self._is_plot
parms.setTrueIfAbsent(cn.PLT_XLABEL)
parms.setTrueIfAbsent(cn.PLT_XTICKLABELS)
#
df_plot = self.cofraction.makeLineDF(
permitted_mutations=permitted_mutations,
transfer=transfer)
if is_unit_fraction:
df_plot = df_plot.applymap(
lambda v: 1 if v> MIN_FRACTION else v)
# Do the plot
if not cn.PLT_FIGSIZE in parms:
parms[cn.PLT_FIGSIZE] = (12, 8)
if ax is None:
ax = df_plot.plot(kind='bar', stacked=True,
figsize=parms[cn.PLT_FIGSIZE], legend=None)
else:
df_plot.plot(kind='bar', stacked=True,
legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE])
ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label
if parms.isFalse(cn.PLT_XTICKLABELS):
labels = ax.get_xticklabels()
new_labels = np.repeat("", len(labels))
ax.set_xticklabels(new_labels)
if parms.isFalse(cn.PLT_YTICKLABELS):
labels = ax.get_yticklabels()
new_labels = np.repeat("", len(labels))
ax.set_yticklabels(new_labels)
if cn.PLT_TITLE in parms:
title = parms[cn.PLT_TITLE]
else:
title = "%s Mutations" % SPECIES[self._species]
xpos = int(len(df_plot)*0.5)
ypos = MAX_LINES - 3
ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE)
ax.set_ylim([0, MAX_LINES])
if parms.isTrue(cn.PLT_YLABEL):
if is_unit_fraction:
label = "No. Lines"
else:
label = "Fraction"
ax.set_ylabel(label , fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_XLABEL):
ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_LEGEND):
ax.legend(loc=(1,2))
#ax.legend()
if is_plot:
plt.show()
return df_plot
def _makeMutationSiglvlMatrix(self,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None, min_fraction=MIN_FRACTION):
"""
Creates a significance level matrix for mutations.
:param int transfer: transfer time for row mutations
:param int other_transfer: transfer time for column mutations
:param float min_fraction: minimum fractional occurrence of
a mutation within a line for it to be considered
:return pd.DataFrame: row index and columns are mutations
"""
def makeDF(transfer):
df_line = self.cofraction.makeLineDF(transfer=transfer)
df_binary = df_line.applymap(
lambda v: 0 if np.isnan(v) else v)
df_binary = df_line.applymap(
lambda v: 1.0 if v > min_fraction else 0)
return df_binary.transpose()
#
if other_transfer is None:
other_transfer = transfer
#
df_binary_rows = makeDF(transfer)
df_binary_columns = makeDF(other_transfer)
df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows,
df_other=df_binary_columns)
return df_matrix
def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
max_siglvl=MAX_SIGLVL):
"""
Constructs a the dataframe used for heatmap.
:param int transfer:
:param float max_siglvl:
:return pd.DataFrame: mutations, mutations,
values are -log10 significance level
"""
df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer,
other_transfer=other_transfer)
sorter = DataframeSorter(df_matrix)
df_sort = sorter.orderBoth()
#
df_transformed = df_sort.applymap(lambda v: np.log10(v))
df_transformed = df_transformed.applymap(lambda v: -v)
ubound = -np.log10(max_siglvl)
df_plot = df_transformed.applymap(
lambda v: np.nan if v < ubound else v)
sorter = DataframeSorter(df_plot)
df_plot = sorter.deleteNanRowsAndColumns()
return df_plot
def plotCofractions(self, is_time_lag=False,
threshold_frac=THRESHOLD_FRAC,
is_difference_frac=False,
is_differenced=False,
is_compress=False,
parms=PlotParms(), **kwargs):
"""
Does a subplots of the fraction of lines in which mutations co-occur.
:param bool is_time_lag: construct time lag subplots
:param bool is_differenced: Computes the difference in
count fractions
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
transfer=transfer, other_transfer=other_transfer,
threshold_frac=threshold_frac)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
return df
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms, heat_range=[0, 1.0], **kwargs)
def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation correlation significance levels.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
return self._plotSiglvlDF(transfer=transfer,
max_siglvl=max_siglvl,
other_transfer=other_transfer)
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
**kwargs)
def _plotTransfers(self, funcDF, is_time_lag,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation mutations over transfers.
:param Function funcDF: has kwargs transfer, other_transfer;
returns a dataframe of mutations as columns and index;
values are used in the heatmap.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
NCOLS = 3
plot_pos = {1:1, 2:3, 3:4, 4:6}
NPLOTS = 6
transfers = self.cofraction.transfers
if is_time_lag:
pairs = [p for p in zip(transfers[:-1], transfers[1:])]
else:
pairs = [p for p in zip(transfers[:-1], transfers[:-1])]
#
# Calculate the column order
df = funcDF(transfer=cn.TRANSFER_1000G,
other_transfer=cn.TRANSFER_1000G)
df = df.fillna(0)
# Set up for plots
nrows = 2 if (len(pairs) == 4) else 3
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
result = {}
for idx, pair in enumerate(pairs):
idx += 1
ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx])
if idx < len(pairs):
is_plot = False
else:
is_plot = True
if idx in [1, 2, 5]:
parms[cn.PLT_XAXISTICKTOP] = True
else:
parms[cn.PLT_XAXISTICKTOP] = False
if idx == 4:
parms[cn.PLT_COLORBAR] = True
else:
parms[cn.PLT_COLORBAR] = False
transfer = pair[0]
other_transfer = pair[1]
df = funcDF(transfer=transfer, other_transfer=other_transfer)
df = df.applymap(lambda v: np.nan if v == 0 else v)
self._plotTransferCompare(df,
transfer=transfer, other_transfer=other_transfer,
ordered_columns=self.cofraction.ordered_mutations,
is_center_colorbar=True,
fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs)
result[pair] = df
return result
def plotSiglvl(self, max_siglvl=MAX_SIGLVL,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_center_colorbar = True,
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence significance
levels.
:param float max_siglvl: maximum significance level
:return pd.DataFrame: columns, rows are mutations
"""
df_plot = self._plotSiglvlDF(transfer=transfer,
other_transfer=other_transfer,
max_siglvl=max_siglvl)
self._plotTransferCompare(df_plot,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
ordered_mutations=self.cofraction.ordered_mutations,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
**kwargs)
return df_plot
def plotCofraction(self,
threshold_frac=THRESHOLD_FRAC,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_difference_frac=False,
is_differenced=False,
is_center_colorbar=True,
is_compress=False,
parms=PlotParms(),
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence fractions.
:param int transfer: Transfer for which plot is done
:param bool is_differenced: Computes the difference in
count fractions
:param bool is_compress: Eliminate rows/columns
with 0 values
:return pd.DataFrame: columns, rows are mutations
"""
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
threshold_frac=threshold_frac,
transfer=transfer, other_transfer=other_transfer,
**kwargs)
df = df.applymap(lambda v: np.nan
if np.abs(v) < threshold_frac else v)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer, **kwargs)
df = df.applymap(lambda v: np.nan if v < threshold_frac else v)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
is_include_missing_mutations = False
else:
is_include_missing_mutations = True
ordered_columns = self.cofraction.ordered_mutations
self._plotTransferCompare(df,
heat_range=[0, 1.0],
ordered_columns=ordered_columns,
parms=parms,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
is_include_missing_mutations=is_include_missing_mutations,
**kwargs)
return df
def _plotTransferCompare(self,
df_plot,
heat_range,
ordered_columns=None,
is_center_colorbar=True,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
ax=None,
fig=None,
is_include_missing_mutations=True,
parms=PlotParms(),
is_plot=None):
"""
Constructs a heatmap comparing values for mutations from two transfers.
:param pd.DataFrame df_plot: index and columns are mutations;
values are plotted on the heatmap
:param list-str ordered_columns: order in which columns appear
:param bool is_center_colorbar: center the colorbar in the plot
:param float, float: values on the heatmap range
:param int transfer:
:param int other_transfer: Allow comparisons across time
:param Matplotlib.Axes ax:
:param PlotParms parms: Parameters for the plot
:param bool is_plot: Overrides constructor plotting directive
:param bool is_include_missing_mutations:
"""
def makeLabel(transfer, column, is_include_column=False):
if is_include_column:
label = "%d-%s" % (transfer, column)
else:
label = "%d" % transfer
return label
def setValue(a_dict, key, default):
if not key in a_dict.keys():
a_dict[key] = default
#
if is_plot is None:
is_plot = self._is_plot
elif not self._is_plot:
is_plot = self._is_plot
if ordered_columns is None:
ordered_columns = list(set(df_plot.columns.tolist()).union(
df_plot.index))
# Do the plot
if not cn.PLT_COLORBAR in parms:
parms[cn.PLT_COLORBAR] = True
if other_transfer is None:
other_transfer = transfer
if ax is None:
if fig is None:
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
ax = fig.add_subplot(1, 1, 1)
# Order the columns
if is_include_missing_mutations:
columns = df_plot.columns.tolist()
missing_columns = set(ordered_columns).difference(columns)
extended_ordered_columns = list(ordered_columns)
extended_ordered_columns.extend(
set(columns).difference(ordered_columns))
for col in missing_columns:
df_plot[col] = np.nan
df_plot.loc[col, :] = np.nan
df_plot = df_plot.reindex(extended_ordered_columns)
df_plot = df_plot[extended_ordered_columns]
rows = df_plot.columns.tolist()
columns = df_plot.columns.tolist()
else:
extended_ordered_columns = ordered_columns
rows = df_plot.index.tolist()
columns = df_plot.columns.tolist()
mutations = df_plot.columns.tolist()
# Set up plot information
parms[cn.PLT_XLABEL] = ""
setValue(parms, cn.PLT_COLORBAR, True)
xpos = 1.05*len(columns)
ypos = -0.05*len(rows)
parms[cn.PLT_XLABEL] = ""
xlabel = makeLabel(other_transfer, self._mutation_column)
parms[cn.PLT_YLABEL] = makeLabel(
transfer, self._mutation_column)
ax.text(xpos, ypos, xlabel, fontsize=parms.fontsize_label)
#
# Construct the plot
plot = ax.pcolor(df_plot, cmap='jet', vmin=heat_range[0],
vmax=heat_range[1])
if parms.isTrue(cn.PLT_COLORBAR):
if is_center_colorbar:
# Colorbar positions: left, bottom, width, height
cbaxes = fig.add_axes([.45, 0.2, 0.01, 0.5])
cb = fig.colorbar(plot, cax = cbaxes, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
else:
cb = fig.colorbar(plot, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
row_labels = df_plot.columns.tolist()
col_labels = df_plot.index.tolist()
if parms.isTrue(cn.PLT_XAXISTICKTOP):
ax.xaxis.tick_top()
ax.set_xticks(np.arange(0.5, len(row_labels)))
ax.set_xticklabels(row_labels, rotation=90,
fontsize=parms.fontsize_label)
ax.set_yticks(np.arange(0.5, len(col_labels)))
ax.set_yticklabels(col_labels,
fontsize=parms.fontsize_label)
#parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
if is_plot:
parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
ylabel = makeLabel(transfer, self._mutation_column)
xpos = -3
ypos = 0.5*len(rows)
ypos = -1
ax.set_ylabel(ylabel, fontsize=parms.fontsize_label,
x=xpos, y=ypos)
#plt.show()
parms.do(is_plot=is_plot)
else:
parms.do(is_plot=is_plot)
| <filename>microbepy/plot/mutation_plot.py
"""Provides plots of mutations for Isolates and Lines."""
from microbepy.common import constants as cn
from microbepy.common.dataframe_sorter import DataframeSorter
from microbepy.common.isolate import Isolate
from microbepy.common import util
from microbepy.correlation import genome_correlation
from microbepy.data.model_data_provider import ModelDataProvider
from microbepy.data import util_data
from microbepy.plot.mutation_cofraction import MutationCofraction
from microbepy.plot.util_plot import PlotParms
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
COLORS = ['red', 'green', 'blue']
SPECIES = {cn.SPECIES_MIX_DVH: "DVH",
cn.SPECIES_MIX_MMP: "MMP",
None: "both"}
FONTSIZE_TITLE = 16
FONTSIZE_LABEL = 8
MAX_LINES = 9
MIN_FRACTION = 0.25
THRESHOLD_FRAC = 0.2
MAX_SIGLVL = 0.01
COLORBAR_MIN = 1.0
COLORBAR_MAX = 4.0
class MutationLinePlot(object):
"""
Plot mutations by occurrences within Lines.
"""
def __init__(self, mutation_column=cn.GGENE_ID, species=None,
is_plot=True):
"""
:param str mutation_column:
:param bool is_plot:
"""
self._mutation_column = mutation_column
self._is_plot = is_plot
self._species = species
self.cofraction = MutationCofraction(species=self._species,
mutation_column=mutation_column)
def plotTransfers(self,
parms=PlotParms(is_initialize=False),
is_unit_fraction = False,
is_cluster_mutations=True):
"""
Does a stacked bar plot of mutation frequency for all transfers.
:param bool is_unit_fraction: round fraction to 1
:param bool is_cluster_mutations: Group similar mutations together
:return pd.DataFrame: row=mutation, col=line + transfer, value is fraction
"""
permitted_mutations = self.cofraction.ordered_mutations
transfers = self.cofraction.transfers
num_transfers = len(transfers)
fig, axes = plt.subplots(nrows=num_transfers, ncols=1)
dfs = []
for idx, transfer in enumerate(transfers):
parms[cn.PLT_YTICKLABELS] = True
if self._species is None:
parms[cn.PLT_TITLE] = "%d" % transfer
else:
parms[cn.PLT_TITLE] = "%s, %d" % (self._species, transfer)
if idx == 0:
parms[cn.PLT_YLABEL] = True
else:
parms[cn.PLT_YLABEL] = False
if idx < num_transfers - 1:
parms[cn.PLT_LEGEND] = False
parms[cn.PLT_XLABEL] = False
parms[cn.PLT_XTICKLABELS] = False
else:
parms[cn.PLT_LEGEND] = True
parms[cn.PLT_XLABEL] = True
parms[cn.PLT_XTICKLABELS] = True
df = self.plotLine(transfer,
parms=parms, is_plot=False,
ax=axes[idx], permitted_mutations=permitted_mutations,
is_unit_fraction=is_unit_fraction)
df[cn.TRANSFER] = transfer
dfs.append(df)
if self._is_plot:
plt.show()
return pd.concat(dfs)
def plotLine(self, transfer,
parms=PlotParms(is_initialize=False),
is_unit_fraction=False,
is_plot=None, ax=None, permitted_mutations=None):
"""
Does a stacked bar plot of mutation frequency by line
with colors
:params int transfer:
:params PlotParms parms:
:params Axis ax: axis to use in plot
:param list-str permitted_mutations: to use and how they
are ordered if None, then use alphabetical order
:param bool is_unit_fraction: round non-zero fraction to 1
:return pd.DataFrame: row=mutation, col=line, value is fraction
"""
if is_plot is None:
is_plot = self._is_plot
parms.setTrueIfAbsent(cn.PLT_XLABEL)
parms.setTrueIfAbsent(cn.PLT_XTICKLABELS)
#
df_plot = self.cofraction.makeLineDF(
permitted_mutations=permitted_mutations,
transfer=transfer)
if is_unit_fraction:
df_plot = df_plot.applymap(
lambda v: 1 if v> MIN_FRACTION else v)
# Do the plot
if not cn.PLT_FIGSIZE in parms:
parms[cn.PLT_FIGSIZE] = (12, 8)
if ax is None:
ax = df_plot.plot(kind='bar', stacked=True,
figsize=parms[cn.PLT_FIGSIZE], legend=None)
else:
df_plot.plot(kind='bar', stacked=True,
legend=None, ax=ax, figsize=parms[cn.PLT_FIGSIZE])
ax.set_xlabel("", fontsize=FONTSIZE_LABEL) # Eliminate implicit label
if parms.isFalse(cn.PLT_XTICKLABELS):
labels = ax.get_xticklabels()
new_labels = np.repeat("", len(labels))
ax.set_xticklabels(new_labels)
if parms.isFalse(cn.PLT_YTICKLABELS):
labels = ax.get_yticklabels()
new_labels = np.repeat("", len(labels))
ax.set_yticklabels(new_labels)
if cn.PLT_TITLE in parms:
title = parms[cn.PLT_TITLE]
else:
title = "%s Mutations" % SPECIES[self._species]
xpos = int(len(df_plot)*0.5)
ypos = MAX_LINES - 3
ax.text(xpos, ypos, title, fontsize=FONTSIZE_TITLE)
ax.set_ylim([0, MAX_LINES])
if parms.isTrue(cn.PLT_YLABEL):
if is_unit_fraction:
label = "No. Lines"
else:
label = "Fraction"
ax.set_ylabel(label , fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_XLABEL):
ax.set_xlabel(self._mutation_column, fontsize=FONTSIZE_LABEL)
if parms.isTrue(cn.PLT_LEGEND):
ax.legend(loc=(1,2))
#ax.legend()
if is_plot:
plt.show()
return df_plot
def _makeMutationSiglvlMatrix(self,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None, min_fraction=MIN_FRACTION):
"""
Creates a significance level matrix for mutations.
:param int transfer: transfer time for row mutations
:param int other_transfer: transfer time for column mutations
:param float min_fraction: minimum fractional occurrence of
a mutation within a line for it to be considered
:return pd.DataFrame: row index and columns are mutations
"""
def makeDF(transfer):
df_line = self.cofraction.makeLineDF(transfer=transfer)
df_binary = df_line.applymap(
lambda v: 0 if np.isnan(v) else v)
df_binary = df_line.applymap(
lambda v: 1.0 if v > min_fraction else 0)
return df_binary.transpose()
#
if other_transfer is None:
other_transfer = transfer
#
df_binary_rows = makeDF(transfer)
df_binary_columns = makeDF(other_transfer)
df_matrix = genome_correlation.makeSiglvlDF(df_binary_rows,
df_other=df_binary_columns)
return df_matrix
def _plotSiglvlDF(self, transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
max_siglvl=MAX_SIGLVL):
"""
Constructs a the dataframe used for heatmap.
:param int transfer:
:param float max_siglvl:
:return pd.DataFrame: mutations, mutations,
values are -log10 significance level
"""
df_matrix = self._makeMutationSiglvlMatrix(transfer=transfer,
other_transfer=other_transfer)
sorter = DataframeSorter(df_matrix)
df_sort = sorter.orderBoth()
#
df_transformed = df_sort.applymap(lambda v: np.log10(v))
df_transformed = df_transformed.applymap(lambda v: -v)
ubound = -np.log10(max_siglvl)
df_plot = df_transformed.applymap(
lambda v: np.nan if v < ubound else v)
sorter = DataframeSorter(df_plot)
df_plot = sorter.deleteNanRowsAndColumns()
return df_plot
def plotCofractions(self, is_time_lag=False,
threshold_frac=THRESHOLD_FRAC,
is_difference_frac=False,
is_differenced=False,
is_compress=False,
parms=PlotParms(), **kwargs):
"""
Does a subplots of the fraction of lines in which mutations co-occur.
:param bool is_time_lag: construct time lag subplots
:param bool is_differenced: Computes the difference in
count fractions
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
transfer=transfer, other_transfer=other_transfer,
threshold_frac=threshold_frac)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
return df
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms, heat_range=[0, 1.0], **kwargs)
def plotSiglvls(self, is_time_lag=False, max_siglvl=MAX_SIGLVL,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation correlation significance levels.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
def funcDF(transfer, other_transfer):
return self._plotSiglvlDF(transfer=transfer,
max_siglvl=max_siglvl,
other_transfer=other_transfer)
#
return self._plotTransfers(funcDF, is_time_lag,
parms=parms,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
**kwargs)
def _plotTransfers(self, funcDF, is_time_lag,
parms=PlotParms(), **kwargs):
"""
Does a subplots of mutation mutations over transfers.
:param Function funcDF: has kwargs transfer, other_transfer;
returns a dataframe of mutations as columns and index;
values are used in the heatmap.
:param bool is_time_lag: construct time lag subplots
:param dict kwargs: non-transfer parameters passed to next level
:return dict: key is pair of transfers, value is data_frame
"""
NCOLS = 3
plot_pos = {1:1, 2:3, 3:4, 4:6}
NPLOTS = 6
transfers = self.cofraction.transfers
if is_time_lag:
pairs = [p for p in zip(transfers[:-1], transfers[1:])]
else:
pairs = [p for p in zip(transfers[:-1], transfers[:-1])]
#
# Calculate the column order
df = funcDF(transfer=cn.TRANSFER_1000G,
other_transfer=cn.TRANSFER_1000G)
df = df.fillna(0)
# Set up for plots
nrows = 2 if (len(pairs) == 4) else 3
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
result = {}
for idx, pair in enumerate(pairs):
idx += 1
ax = fig.add_subplot(nrows, NCOLS, plot_pos[idx])
if idx < len(pairs):
is_plot = False
else:
is_plot = True
if idx in [1, 2, 5]:
parms[cn.PLT_XAXISTICKTOP] = True
else:
parms[cn.PLT_XAXISTICKTOP] = False
if idx == 4:
parms[cn.PLT_COLORBAR] = True
else:
parms[cn.PLT_COLORBAR] = False
transfer = pair[0]
other_transfer = pair[1]
df = funcDF(transfer=transfer, other_transfer=other_transfer)
df = df.applymap(lambda v: np.nan if v == 0 else v)
self._plotTransferCompare(df,
transfer=transfer, other_transfer=other_transfer,
ordered_columns=self.cofraction.ordered_mutations,
is_center_colorbar=True,
fig=fig, ax=ax, parms=parms, is_plot=is_plot, **kwargs)
result[pair] = df
return result
def plotSiglvl(self, max_siglvl=MAX_SIGLVL,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_center_colorbar = True,
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence significance
levels.
:param float max_siglvl: maximum significance level
:return pd.DataFrame: columns, rows are mutations
"""
df_plot = self._plotSiglvlDF(transfer=transfer,
other_transfer=other_transfer,
max_siglvl=max_siglvl)
self._plotTransferCompare(df_plot,
heat_range = [COLORBAR_MIN, COLORBAR_MAX],
ordered_mutations=self.cofraction.ordered_mutations,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
**kwargs)
return df_plot
def plotCofraction(self,
threshold_frac=THRESHOLD_FRAC,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
is_difference_frac=False,
is_differenced=False,
is_center_colorbar=True,
is_compress=False,
parms=PlotParms(),
**kwargs):
"""
Constructs a heatmap of the mutation coocurrence fractions.
:param int transfer: Transfer for which plot is done
:param bool is_differenced: Computes the difference in
count fractions
:param bool is_compress: Eliminate rows/columns
with 0 values
:return pd.DataFrame: columns, rows are mutations
"""
if is_differenced:
df = self.cofraction.makeCofractionDifferencedDF(
threshold_frac=threshold_frac,
transfer=transfer, other_transfer=other_transfer,
**kwargs)
df = df.applymap(lambda v: np.nan
if np.abs(v) < threshold_frac else v)
else:
df = self.cofraction.makeCofractionDF(transfer=transfer,
is_difference_frac=is_difference_frac,
other_transfer=other_transfer, **kwargs)
df = df.applymap(lambda v: np.nan if v < threshold_frac else v)
if is_compress:
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
is_include_missing_mutations = False
else:
is_include_missing_mutations = True
ordered_columns = self.cofraction.ordered_mutations
self._plotTransferCompare(df,
heat_range=[0, 1.0],
ordered_columns=ordered_columns,
parms=parms,
transfer=transfer, other_transfer=other_transfer,
is_center_colorbar=is_center_colorbar,
is_include_missing_mutations=is_include_missing_mutations,
**kwargs)
return df
def _plotTransferCompare(self,
df_plot,
heat_range,
ordered_columns=None,
is_center_colorbar=True,
transfer=cn.TRANSFER_DEFAULT,
other_transfer=None,
ax=None,
fig=None,
is_include_missing_mutations=True,
parms=PlotParms(),
is_plot=None):
"""
Constructs a heatmap comparing values for mutations from two transfers.
:param pd.DataFrame df_plot: index and columns are mutations;
values are plotted on the heatmap
:param list-str ordered_columns: order in which columns appear
:param bool is_center_colorbar: center the colorbar in the plot
:param float, float: values on the heatmap range
:param int transfer:
:param int other_transfer: Allow comparisons across time
:param Matplotlib.Axes ax:
:param PlotParms parms: Parameters for the plot
:param bool is_plot: Overrides constructor plotting directive
:param bool is_include_missing_mutations:
"""
def makeLabel(transfer, column, is_include_column=False):
if is_include_column:
label = "%d-%s" % (transfer, column)
else:
label = "%d" % transfer
return label
def setValue(a_dict, key, default):
if not key in a_dict.keys():
a_dict[key] = default
#
if is_plot is None:
is_plot = self._is_plot
elif not self._is_plot:
is_plot = self._is_plot
if ordered_columns is None:
ordered_columns = list(set(df_plot.columns.tolist()).union(
df_plot.index))
# Do the plot
if not cn.PLT_COLORBAR in parms:
parms[cn.PLT_COLORBAR] = True
if other_transfer is None:
other_transfer = transfer
if ax is None:
if fig is None:
fig = plt.figure(figsize=parms[cn.PLT_FIGSIZE])
ax = fig.add_subplot(1, 1, 1)
# Order the columns
if is_include_missing_mutations:
columns = df_plot.columns.tolist()
missing_columns = set(ordered_columns).difference(columns)
extended_ordered_columns = list(ordered_columns)
extended_ordered_columns.extend(
set(columns).difference(ordered_columns))
for col in missing_columns:
df_plot[col] = np.nan
df_plot.loc[col, :] = np.nan
df_plot = df_plot.reindex(extended_ordered_columns)
df_plot = df_plot[extended_ordered_columns]
rows = df_plot.columns.tolist()
columns = df_plot.columns.tolist()
else:
extended_ordered_columns = ordered_columns
rows = df_plot.index.tolist()
columns = df_plot.columns.tolist()
mutations = df_plot.columns.tolist()
# Set up plot information
parms[cn.PLT_XLABEL] = ""
setValue(parms, cn.PLT_COLORBAR, True)
xpos = 1.05*len(columns)
ypos = -0.05*len(rows)
parms[cn.PLT_XLABEL] = ""
xlabel = makeLabel(other_transfer, self._mutation_column)
parms[cn.PLT_YLABEL] = makeLabel(
transfer, self._mutation_column)
ax.text(xpos, ypos, xlabel, fontsize=parms.fontsize_label)
#
# Construct the plot
plot = ax.pcolor(df_plot, cmap='jet', vmin=heat_range[0],
vmax=heat_range[1])
if parms.isTrue(cn.PLT_COLORBAR):
if is_center_colorbar:
# Colorbar positions: left, bottom, width, height
cbaxes = fig.add_axes([.45, 0.2, 0.01, 0.5])
cb = fig.colorbar(plot, cax = cbaxes, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
else:
cb = fig.colorbar(plot, cmap='jet')
cb.ax.tick_params(labelsize=parms.fontsize_label)
row_labels = df_plot.columns.tolist()
col_labels = df_plot.index.tolist()
if parms.isTrue(cn.PLT_XAXISTICKTOP):
ax.xaxis.tick_top()
ax.set_xticks(np.arange(0.5, len(row_labels)))
ax.set_xticklabels(row_labels, rotation=90,
fontsize=parms.fontsize_label)
ax.set_yticks(np.arange(0.5, len(col_labels)))
ax.set_yticklabels(col_labels,
fontsize=parms.fontsize_label)
#parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
if is_plot:
parms[cn.PLT_YLABEL] = ""
parms.do(is_plot=False)
ylabel = makeLabel(transfer, self._mutation_column)
xpos = -3
ypos = 0.5*len(rows)
ypos = -1
ax.set_ylabel(ylabel, fontsize=parms.fontsize_label,
x=xpos, y=ypos)
#plt.show()
parms.do(is_plot=is_plot)
else:
parms.do(is_plot=is_plot)
| en | 0.66149 | Provides plots of mutations for Isolates and Lines. Plot mutations by occurrences within Lines. :param str mutation_column: :param bool is_plot: Does a stacked bar plot of mutation frequency for all transfers. :param bool is_unit_fraction: round fraction to 1 :param bool is_cluster_mutations: Group similar mutations together :return pd.DataFrame: row=mutation, col=line + transfer, value is fraction Does a stacked bar plot of mutation frequency by line with colors :params int transfer: :params PlotParms parms: :params Axis ax: axis to use in plot :param list-str permitted_mutations: to use and how they are ordered if None, then use alphabetical order :param bool is_unit_fraction: round non-zero fraction to 1 :return pd.DataFrame: row=mutation, col=line, value is fraction # # Do the plot # Eliminate implicit label #ax.legend() Creates a significance level matrix for mutations. :param int transfer: transfer time for row mutations :param int other_transfer: transfer time for column mutations :param float min_fraction: minimum fractional occurrence of a mutation within a line for it to be considered :return pd.DataFrame: row index and columns are mutations # # Constructs a the dataframe used for heatmap. :param int transfer: :param float max_siglvl: :return pd.DataFrame: mutations, mutations, values are -log10 significance level # Does a subplots of the fraction of lines in which mutations co-occur. :param bool is_time_lag: construct time lag subplots :param bool is_differenced: Computes the difference in count fractions :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame # Does a subplots of mutation correlation significance levels. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame # Does a subplots of mutation mutations over transfers. :param Function funcDF: has kwargs transfer, other_transfer; returns a dataframe of mutations as columns and index; values are used in the heatmap. :param bool is_time_lag: construct time lag subplots :param dict kwargs: non-transfer parameters passed to next level :return dict: key is pair of transfers, value is data_frame # # Calculate the column order # Set up for plots Constructs a heatmap of the mutation coocurrence significance levels. :param float max_siglvl: maximum significance level :return pd.DataFrame: columns, rows are mutations Constructs a heatmap of the mutation coocurrence fractions. :param int transfer: Transfer for which plot is done :param bool is_differenced: Computes the difference in count fractions :param bool is_compress: Eliminate rows/columns with 0 values :return pd.DataFrame: columns, rows are mutations Constructs a heatmap comparing values for mutations from two transfers. :param pd.DataFrame df_plot: index and columns are mutations; values are plotted on the heatmap :param list-str ordered_columns: order in which columns appear :param bool is_center_colorbar: center the colorbar in the plot :param float, float: values on the heatmap range :param int transfer: :param int other_transfer: Allow comparisons across time :param Matplotlib.Axes ax: :param PlotParms parms: Parameters for the plot :param bool is_plot: Overrides constructor plotting directive :param bool is_include_missing_mutations: # # Do the plot # Order the columns # Set up plot information # # Construct the plot # Colorbar positions: left, bottom, width, height #parms[cn.PLT_YLABEL] = "" #plt.show() | 2.80689 | 3 |
semantic-segmentation/deeplabv3plus/dataset_utils.py | shikisawamura/nnabla-examples | 1 | 417 | <filename>semantic-segmentation/deeplabv3plus/dataset_utils.py
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from scipy.misc import imread
from args import get_args
import matplotlib.pyplot as plt
def get_color():
# RGB format
return np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [120, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128], [224, 224, 192]])
def encode_label(label):
'''
Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2
'''
h, w, c = label.shape
new_label = np.zeros((h, w, 1), dtype=np.int32)
cls_to_clr_map = get_color()
for i in range(cls_to_clr_map.shape[0]):
#new_label[(label == cls_to_clr_map[i])[:,:,0]] = i
#new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i
print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2)))
if i == 21:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255
else:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i
return new_label
# this method should generate train-image.txt and train-label.txt
def generate_path_files(data_dir, train_file, val_file):
ti = open('train_image.txt', 'w')
tl = open('train_label.txt', 'w')
vi = open('val_image.txt', 'w')
vl = open('val_label.txt', 'w')
rootdir = data_dir
train_text_file = open(train_file, "r")
lines = [line[:-1] for line in train_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
ti.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
tl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
val_text_file = open(val_file, "r")
lines = [line[:-1] for line in val_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
vi.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
vl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
ti.close()
tl.close()
vi.close()
vl.close()
def main():
'''
Arguments:
train-file = txt file containing randomly selected image filenames to be taken as training set.
val-file = txt file containing randomly selected image filenames to be taken as validation set.
data-dir = dataset directory
Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir=""
'''
args = get_args()
data_dir = args.data_dir
if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'):
os.makedirs(data_dir+'SegmentationClass/' + 'encoded/')
for filename in os.listdir(data_dir+'SegmentationClass/'):
if os.path.isdir(data_dir+'SegmentationClass/' + filename):
continue
label = imread(data_dir+'SegmentationClass/' +
filename).astype('float32')
label = encode_label(label)
np.save(data_dir+'SegmentationClass/' + 'encoded/' +
filename.split('.')[0] + '.npy', label)
generate_path_files(args.data_dir, args.train_file, args.val_file)
if __name__ == '__main__':
main()
| <filename>semantic-segmentation/deeplabv3plus/dataset_utils.py
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from scipy.misc import imread
from args import get_args
import matplotlib.pyplot as plt
def get_color():
# RGB format
return np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128], [120, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128], [224, 224, 192]])
def encode_label(label):
'''
Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2
'''
h, w, c = label.shape
new_label = np.zeros((h, w, 1), dtype=np.int32)
cls_to_clr_map = get_color()
for i in range(cls_to_clr_map.shape[0]):
#new_label[(label == cls_to_clr_map[i])[:,:,0]] = i
#new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i
print(np.where((label.astype(np.int32) == [120, 0, 128]).all(axis=2)))
if i == 21:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = 255
else:
new_label[np.where(
(label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))] = i
return new_label
# this method should generate train-image.txt and train-label.txt
def generate_path_files(data_dir, train_file, val_file):
ti = open('train_image.txt', 'w')
tl = open('train_label.txt', 'w')
vi = open('val_image.txt', 'w')
vl = open('val_label.txt', 'w')
rootdir = data_dir
train_text_file = open(train_file, "r")
lines = [line[:-1] for line in train_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
ti.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
tl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
val_text_file = open(val_file, "r")
lines = [line[:-1] for line in val_text_file]
for line in lines:
if os.path.exists(data_dir+'JPEGImages/'+line+'.jpg'):
vi.write(data_dir+'JPEGImages/'+line+'.jpg' + '\n')
assert (os.path.isfile(data_dir+'SegmentationClass/encoded/'+line +
'.npy')), "No matching label file for image : " + line + '.jpg'
vl.write(data_dir+'SegmentationClass/encoded/'+line + '.npy' + '\n')
ti.close()
tl.close()
vi.close()
vl.close()
def main():
'''
Arguments:
train-file = txt file containing randomly selected image filenames to be taken as training set.
val-file = txt file containing randomly selected image filenames to be taken as validation set.
data-dir = dataset directory
Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir=""
'''
args = get_args()
data_dir = args.data_dir
if not os.path.exists(data_dir+'SegmentationClass/' + 'encoded/'):
os.makedirs(data_dir+'SegmentationClass/' + 'encoded/')
for filename in os.listdir(data_dir+'SegmentationClass/'):
if os.path.isdir(data_dir+'SegmentationClass/' + filename):
continue
label = imread(data_dir+'SegmentationClass/' +
filename).astype('float32')
label = encode_label(label)
np.save(data_dir+'SegmentationClass/' + 'encoded/' +
filename.split('.')[0] + '.npy', label)
generate_path_files(args.data_dir, args.train_file, args.val_file)
if __name__ == '__main__':
main()
| en | 0.77524 | # Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # RGB format Converting pixel values to corresponding class numbers. Assuming that the input label in 3-dim(h,w,c) and in BGR fromat read from cv2 #new_label[(label == cls_to_clr_map[i])[:,:,0]] = i #new_label[np.argwhere((label.astype(np.int32) == cls_to_clr_map[i]).all(axis=2))]=i # this method should generate train-image.txt and train-label.txt Arguments: train-file = txt file containing randomly selected image filenames to be taken as training set. val-file = txt file containing randomly selected image filenames to be taken as validation set. data-dir = dataset directory Usage: python dataset_utils.py --train-file="" --val-file="" --data_dir="" | 2.624851 | 3 |
train.py | shamilcm/fairseq-py | 1 | 418 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import collections
import os
import torch
import math
from fairseq import bleu, data, options, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.multiprocessing_trainer import MultiprocessingTrainer
from fairseq.progress_bar import progress_bar
from fairseq.sequence_generator import SequenceGenerator
def main():
parser = options.get_parser('Trainer')
dataset_args = options.add_dataset_args(parser)
dataset_args.add_argument('--max-tokens', default=0, type=int, metavar='N',
help='maximum number of tokens in a batch')
dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
help='batch size')
dataset_args.add_argument('--test-batch-size', default=32, type=int, metavar='N',
help='batch size for test set')
dataset_args.add_argument('--valid-batch-size', default=32, type=int, metavar='N',
help='batch size for validation set')
dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list ofdata subsets '
' to use for validation (train, valid, valid1,test, test1)')
dataset_args.add_argument('--test-subset', default='test', metavar='SPLIT',
help='comma separated list ofdata subset '
'to use for testing (train, valid, test)')
dataset_args.add_argument('--valid-script', nargs='+', metavar='PATH', help='path to external validation script (optional).')
options.add_optimization_args(parser)
options.add_checkpoint_args(parser)
options.add_model_args(parser)
args = utils.parse_args_and_arch(parser)
print(args)
if args.no_progress_bar:
progress_bar.enabled = False
progress_bar.print_interval = args.log_interval
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
torch.manual_seed(args.seed)
# Setting args.max_tokens to infinity(same as setting to None)
if args.max_tokens == 0:
args.max_tokens = None
# Load dataset
dataset = data.load_with_check(args.data, args.source_lang, args.target_lang)
if args.source_lang is None or args.target_lang is None:
# record inferred languages in args, so that it's saved in checkpoints
args.source_lang, args.target_lang = dataset.src, dataset.dst
print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
for split in dataset.splits:
print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
num_gpus = torch.cuda.device_count()
print('| using {} GPUs (with max tokens per GPU = {})'.format(num_gpus, args.max_tokens))
# Build model
print('| model {}'.format(args.arch))
model = utils.build_model(args, dataset)
criterion = utils.build_criterion(args, dataset)
# Start multiprocessing
trainer = MultiprocessingTrainer(args, model)
# Load the latest checkpoint if one is available
epoch, batch_offset = trainer.load_checkpoint(os.path.join(args.save_dir, args.restore_file))
# Train until the learning rate gets too small
val_loss = None
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch <= max_epoch:
# train for one epoch
train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus)
# evaluate on validate set
for k, subset in enumerate(args.valid_subset.split(',')):
val_loss = validate(args, epoch, trainer, criterion, dataset, subset, num_gpus)
if k == 0:
if not args.no_save:
# save checkpoint
trainer.save_checkpoint(args, epoch, 0, val_loss, validation_script=args.valid_script)
# only use first validation loss to update the learning schedule
lr = trainer.lr_step(val_loss, epoch)
epoch += 1
batch_offset = 0
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
# Generate on test set and compute BLEU score
for beam in [1, 5, 10, 20]:
for subset in args.test_subset.split(','):
scorer = score_test(args, trainer.get_model(), dataset, subset, beam,
cuda_device=(0 if num_gpus > 0 else None))
print('| Test on {} with beam={}: {}'.format(subset, beam, scorer.result_string()))
# Stop multiprocessing
trainer.stop()
def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus):
"""Train the model for one epoch."""
itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size,
test_batch_size=args.test_batch_size,
valid_batch_size=args.valid_batch_size,
num_workers=args.workers,
max_tokens=args.max_tokens, seed=args.seed, epoch=epoch,
max_positions=args.max_positions,
sample_without_replacement=args.sample_without_replacement)
loss_meter = AverageMeter()
bsz_meter = AverageMeter() # sentences per batch
wpb_meter = AverageMeter() # words per batch
wps_meter = TimeMeter() # words per second
clip_meter = AverageMeter() # % of updates clipped
gnorm_meter = AverageMeter() # gradient norm
desc = '| epoch {:03d}'.format(epoch)
lr = trainer.get_lr()
with progress_bar(itr, desc, leave=False) as t:
for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):
loss, grad_norm = trainer.train_step(sample, criterion)
ntokens = sum(s['ntokens'] for s in sample)
src_size = sum(s['src_tokens'].size(0) for s in sample)
loss_meter.update(loss, ntokens)
bsz_meter.update(src_size)
wpb_meter.update(ntokens)
wps_meter.update(ntokens)
clip_meter.update(1 if grad_norm > args.clip_norm else 0)
gnorm_meter.update(grad_norm)
t.set_postfix(collections.OrderedDict([
('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)),
('wps', '{:5d}'.format(round(wps_meter.avg))),
('wpb', '{:5d}'.format(round(wpb_meter.avg))),
('bsz', '{:5d}'.format(round(bsz_meter.avg))),
('lr', lr),
('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),
('gnorm', '{:.4f}'.format(gnorm_meter.avg)),
]))
if i == 0:
# ignore the first mini-batch in words-per-second calculation
wps_meter.reset()
if args.save_interval > 0 and (i + 1) % args.save_interval == 0:
trainer.save_checkpoint(args, epoch, i + 1)
fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}'
fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}'
fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}'
t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg),
round(wps_meter.elapsed_time),
round(wps_meter.avg),
round(wpb_meter.avg),
round(bsz_meter.avg),
lr, clip_meter.avg * 100,
gnorm_meter.avg))
def validate(args, epoch, trainer, criterion, dataset, subset, ngpus):
"""Evaluate the model on the validation set and return the average loss."""
itr = dataset.dataloader(subset, batch_size=None,
max_tokens=args.max_tokens,
max_positions=args.max_positions)
loss_meter = AverageMeter()
desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset)
with progress_bar(itr, desc, leave=False) as t:
for _, sample in data.skip_group_enumerator(t, ngpus):
ntokens = sum(s['ntokens'] for s in sample)
loss = trainer.valid_step(sample, criterion)
loss_meter.update(loss, ntokens)
t.set_postfix(loss='{:.2f}'.format(loss_meter.avg))
val_loss = loss_meter.avg
t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}'
.format(val_loss, math.pow(2, val_loss)))
# update and return the learning rate
return val_loss
def score_test(args, model, dataset, subset, beam, cuda_device):
"""Evaluate the model on the test set and return the BLEU scorer."""
translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam)
if torch.cuda.is_available():
translator.cuda()
scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions)
for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device):
scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu())
return scorer
if __name__ == '__main__':
main()
| # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import collections
import os
import torch
import math
from fairseq import bleu, data, options, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.multiprocessing_trainer import MultiprocessingTrainer
from fairseq.progress_bar import progress_bar
from fairseq.sequence_generator import SequenceGenerator
def main():
parser = options.get_parser('Trainer')
dataset_args = options.add_dataset_args(parser)
dataset_args.add_argument('--max-tokens', default=0, type=int, metavar='N',
help='maximum number of tokens in a batch')
dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
help='batch size')
dataset_args.add_argument('--test-batch-size', default=32, type=int, metavar='N',
help='batch size for test set')
dataset_args.add_argument('--valid-batch-size', default=32, type=int, metavar='N',
help='batch size for validation set')
dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list ofdata subsets '
' to use for validation (train, valid, valid1,test, test1)')
dataset_args.add_argument('--test-subset', default='test', metavar='SPLIT',
help='comma separated list ofdata subset '
'to use for testing (train, valid, test)')
dataset_args.add_argument('--valid-script', nargs='+', metavar='PATH', help='path to external validation script (optional).')
options.add_optimization_args(parser)
options.add_checkpoint_args(parser)
options.add_model_args(parser)
args = utils.parse_args_and_arch(parser)
print(args)
if args.no_progress_bar:
progress_bar.enabled = False
progress_bar.print_interval = args.log_interval
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
torch.manual_seed(args.seed)
# Setting args.max_tokens to infinity(same as setting to None)
if args.max_tokens == 0:
args.max_tokens = None
# Load dataset
dataset = data.load_with_check(args.data, args.source_lang, args.target_lang)
if args.source_lang is None or args.target_lang is None:
# record inferred languages in args, so that it's saved in checkpoints
args.source_lang, args.target_lang = dataset.src, dataset.dst
print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
for split in dataset.splits:
print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
num_gpus = torch.cuda.device_count()
print('| using {} GPUs (with max tokens per GPU = {})'.format(num_gpus, args.max_tokens))
# Build model
print('| model {}'.format(args.arch))
model = utils.build_model(args, dataset)
criterion = utils.build_criterion(args, dataset)
# Start multiprocessing
trainer = MultiprocessingTrainer(args, model)
# Load the latest checkpoint if one is available
epoch, batch_offset = trainer.load_checkpoint(os.path.join(args.save_dir, args.restore_file))
# Train until the learning rate gets too small
val_loss = None
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch <= max_epoch:
# train for one epoch
train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus)
# evaluate on validate set
for k, subset in enumerate(args.valid_subset.split(',')):
val_loss = validate(args, epoch, trainer, criterion, dataset, subset, num_gpus)
if k == 0:
if not args.no_save:
# save checkpoint
trainer.save_checkpoint(args, epoch, 0, val_loss, validation_script=args.valid_script)
# only use first validation loss to update the learning schedule
lr = trainer.lr_step(val_loss, epoch)
epoch += 1
batch_offset = 0
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
# Generate on test set and compute BLEU score
for beam in [1, 5, 10, 20]:
for subset in args.test_subset.split(','):
scorer = score_test(args, trainer.get_model(), dataset, subset, beam,
cuda_device=(0 if num_gpus > 0 else None))
print('| Test on {} with beam={}: {}'.format(subset, beam, scorer.result_string()))
# Stop multiprocessing
trainer.stop()
def train(args, epoch, batch_offset, trainer, criterion, dataset, num_gpus):
"""Train the model for one epoch."""
itr = dataset.dataloader(args.train_subset, batch_size=args.batch_size,
test_batch_size=args.test_batch_size,
valid_batch_size=args.valid_batch_size,
num_workers=args.workers,
max_tokens=args.max_tokens, seed=args.seed, epoch=epoch,
max_positions=args.max_positions,
sample_without_replacement=args.sample_without_replacement)
loss_meter = AverageMeter()
bsz_meter = AverageMeter() # sentences per batch
wpb_meter = AverageMeter() # words per batch
wps_meter = TimeMeter() # words per second
clip_meter = AverageMeter() # % of updates clipped
gnorm_meter = AverageMeter() # gradient norm
desc = '| epoch {:03d}'.format(epoch)
lr = trainer.get_lr()
with progress_bar(itr, desc, leave=False) as t:
for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):
loss, grad_norm = trainer.train_step(sample, criterion)
ntokens = sum(s['ntokens'] for s in sample)
src_size = sum(s['src_tokens'].size(0) for s in sample)
loss_meter.update(loss, ntokens)
bsz_meter.update(src_size)
wpb_meter.update(ntokens)
wps_meter.update(ntokens)
clip_meter.update(1 if grad_norm > args.clip_norm else 0)
gnorm_meter.update(grad_norm)
t.set_postfix(collections.OrderedDict([
('loss', '{:.2f} ({:.2f})'.format(loss, loss_meter.avg)),
('wps', '{:5d}'.format(round(wps_meter.avg))),
('wpb', '{:5d}'.format(round(wpb_meter.avg))),
('bsz', '{:5d}'.format(round(bsz_meter.avg))),
('lr', lr),
('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),
('gnorm', '{:.4f}'.format(gnorm_meter.avg)),
]))
if i == 0:
# ignore the first mini-batch in words-per-second calculation
wps_meter.reset()
if args.save_interval > 0 and (i + 1) % args.save_interval == 0:
trainer.save_checkpoint(args, epoch, i + 1)
fmt = desc + ' | train loss {:2.2f} | train ppl {:3.2f}'
fmt += ' | s/checkpoint {:7d} | words/s {:6d} | words/batch {:6d}'
fmt += ' | bsz {:5d} | lr {:0.6f} | clip {:3.0f}% | gnorm {:.4f}'
t.write(fmt.format(loss_meter.avg, math.pow(2, loss_meter.avg),
round(wps_meter.elapsed_time),
round(wps_meter.avg),
round(wpb_meter.avg),
round(bsz_meter.avg),
lr, clip_meter.avg * 100,
gnorm_meter.avg))
def validate(args, epoch, trainer, criterion, dataset, subset, ngpus):
"""Evaluate the model on the validation set and return the average loss."""
itr = dataset.dataloader(subset, batch_size=None,
max_tokens=args.max_tokens,
max_positions=args.max_positions)
loss_meter = AverageMeter()
desc = '| epoch {:03d} | valid on \'{}\' subset'.format(epoch, subset)
with progress_bar(itr, desc, leave=False) as t:
for _, sample in data.skip_group_enumerator(t, ngpus):
ntokens = sum(s['ntokens'] for s in sample)
loss = trainer.valid_step(sample, criterion)
loss_meter.update(loss, ntokens)
t.set_postfix(loss='{:.2f}'.format(loss_meter.avg))
val_loss = loss_meter.avg
t.write(desc + ' | valid loss {:2.2f} | valid ppl {:3.2f}'
.format(val_loss, math.pow(2, val_loss)))
# update and return the learning rate
return val_loss
def score_test(args, model, dataset, subset, beam, cuda_device):
"""Evaluate the model on the test set and return the BLEU scorer."""
translator = SequenceGenerator([model], dataset.dst_dict, beam_size=beam)
if torch.cuda.is_available():
translator.cuda()
scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
itr = dataset.dataloader(subset, batch_size=4, max_positions=args.max_positions)
for _, _, ref, hypos in translator.generate_batched_itr(itr, cuda_device=cuda_device):
scorer.add(ref.int().cpu(), hypos[0]['tokens'].int().cpu())
return scorer
if __name__ == '__main__':
main()
| en | 0.818954 | # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # # Setting args.max_tokens to infinity(same as setting to None) # Load dataset # record inferred languages in args, so that it's saved in checkpoints # Build model # Start multiprocessing # Load the latest checkpoint if one is available # Train until the learning rate gets too small # train for one epoch # evaluate on validate set # save checkpoint # only use first validation loss to update the learning schedule # Generate on test set and compute BLEU score # Stop multiprocessing Train the model for one epoch. # sentences per batch # words per batch # words per second # % of updates clipped # gradient norm # ignore the first mini-batch in words-per-second calculation Evaluate the model on the validation set and return the average loss. # update and return the learning rate Evaluate the model on the test set and return the BLEU scorer. | 1.979635 | 2 |
storm_control/sc_library/log_timing.py | jeffmoffitt/storm-control | 0 | 419 | <gh_stars>0
#!/usr/bin/env python
"""
This parses a log file series (i.e. log, log.1, log.2, etc..) and
outputs timing and call frequency information for HAL messages.
Hazen 5/18
"""
from datetime import datetime
import os
pattern = '%Y-%m-%d %H:%M:%S,%f'
class Message(object):
"""
Storage for the timing of a single message.
"""
def __init__(self, m_type = None, source = None, time = None, zero_time = None, **kwds):
super().__init__(**kwds)
self.created_time = None
self.m_type = m_type
self.n_workers = 0
self.processing_time = None
self.queued_time = None
self.source = source
self.temp = self.parseTime(time)
self.created(zero_time)
def created(self, time):
t_time = self.parseTime(time)
self.created_time = (self.temp - t_time).total_seconds()
def getCreatedTime(self):
"""
Returns the time when the message was created relative to first
time in the log file in seconds.
"""
return self.created_time
def getNWorkers(self):
"""
Return the number of workers (QRunnables) that were employed
to process this message.
"""
return self.n_workers
def getProcessingTime(self):
"""
Return time to process in seconds.
"""
return self.processing_time
def getQueuedTime(self):
"""
Return time queued in seconds.
"""
return self.queued_time
def getSource(self):
"""
Returns the source of a message.
"""
return self.source
def getType(self):
"""
Return the message type.
"""
return self.m_type
def incNWorkers(self):
self.n_workers += 1
def isComplete(self):
"""
Returns true if we have all the timing data for this message.
"""
return (self.processing_time != None)
def parseTime(self, time):
return datetime.strptime(time, pattern)
def processed(self, time):
t_time = self.parseTime(time)
self.processing_time = (t_time - self.temp).total_seconds()
def sent(self, time):
t_time = self.parseTime(time)
self.queued_time = (t_time - self.temp).total_seconds()
self.temp = t_time
def getIterable(dict_or_list):
"""
Returns an iterable given a dictionary of a list.
"""
if isinstance(dict_or_list, dict):
iterable = list(dict_or_list.values())
elif isinstance(dict_or_list, list):
iterable = dict_or_list
else:
raise Exception("Unknown type '" + str(type(dict_or_list)) + "'")
return iterable
def groupByMsgType(messages):
"""
Returns a dictionary keyed by message type, with a list of one or
more message objects per message type.
"""
return groupByX(lambda x : x.getType(),
messages)
def groupBySource(messages):
"""
Returns a dictionary keyed by message source, with a list of one or
more message objects per message source.
"""
return groupByX(lambda x : x.getSource(),
messages)
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
# Ignore messages that we don't have all the timing for.
if msg.isComplete() or not ignore_incomplete:
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp
def logTiming(basename, ignore_incomplete = False):
"""
Returns a dictionary of Message objects keyed by their ID number.
"""
zero_time = None
messages = {}
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
fname = basename + ".out" + ext
if not os.path.exists(fname):
print(fname, "not found.")
continue
with open(fname) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:"))
except ValueError:
continue
if zero_time is None:
zero_time = time
# Message queued.
if (command.startswith("queued,")):
[m_id, source, m_type] = command.split(",")[1:]
messages[m_id] = Message(m_type = m_type,
source = source,
time = time,
zero_time = zero_time)
# Message sent.
elif (command.startswith("sent,")):
m_id = command.split(",")[1]
messages[m_id].sent(time)
# Message processed.
elif (command.startswith("processed,")):
m_id = command.split(",")[1]
messages[m_id].processed(time)
elif (command.startswith("worker done,")):
m_id = command.split(",")[1]
messages[m_id].incNWorkers()
# Ignore messages that we don't have all the timing for.
if not ignore_incomplete:
temp = {}
for m_id in messages:
msg = messages[m_id]
if msg.isComplete():
temp[m_id] = msg
return temp
else:
return messages
def processingTime(messages):
"""
Returns the total processing time for a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getProcessingTime()
else:
accum_time += msg.getProcessingTime()
return accum_time
def queuedTime(messages):
"""
Returns the total queued time for a a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getQueuedTime()
else:
accum_time += msg.getQueuedTime()
return accum_time
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <log file>")
exit()
messages = logTiming(sys.argv[1])
groups = groupByMsgType(messages)
print()
print("All messages:")
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total queued time {0:.3f} seconds".format(queuedTime(groups)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
print()
print("Film messages:")
groups = groupByMsgType(groupBySource(messages)["film"])
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
| #!/usr/bin/env python
"""
This parses a log file series (i.e. log, log.1, log.2, etc..) and
outputs timing and call frequency information for HAL messages.
Hazen 5/18
"""
from datetime import datetime
import os
pattern = '%Y-%m-%d %H:%M:%S,%f'
class Message(object):
"""
Storage for the timing of a single message.
"""
def __init__(self, m_type = None, source = None, time = None, zero_time = None, **kwds):
super().__init__(**kwds)
self.created_time = None
self.m_type = m_type
self.n_workers = 0
self.processing_time = None
self.queued_time = None
self.source = source
self.temp = self.parseTime(time)
self.created(zero_time)
def created(self, time):
t_time = self.parseTime(time)
self.created_time = (self.temp - t_time).total_seconds()
def getCreatedTime(self):
"""
Returns the time when the message was created relative to first
time in the log file in seconds.
"""
return self.created_time
def getNWorkers(self):
"""
Return the number of workers (QRunnables) that were employed
to process this message.
"""
return self.n_workers
def getProcessingTime(self):
"""
Return time to process in seconds.
"""
return self.processing_time
def getQueuedTime(self):
"""
Return time queued in seconds.
"""
return self.queued_time
def getSource(self):
"""
Returns the source of a message.
"""
return self.source
def getType(self):
"""
Return the message type.
"""
return self.m_type
def incNWorkers(self):
self.n_workers += 1
def isComplete(self):
"""
Returns true if we have all the timing data for this message.
"""
return (self.processing_time != None)
def parseTime(self, time):
return datetime.strptime(time, pattern)
def processed(self, time):
t_time = self.parseTime(time)
self.processing_time = (t_time - self.temp).total_seconds()
def sent(self, time):
t_time = self.parseTime(time)
self.queued_time = (t_time - self.temp).total_seconds()
self.temp = t_time
def getIterable(dict_or_list):
"""
Returns an iterable given a dictionary of a list.
"""
if isinstance(dict_or_list, dict):
iterable = list(dict_or_list.values())
elif isinstance(dict_or_list, list):
iterable = dict_or_list
else:
raise Exception("Unknown type '" + str(type(dict_or_list)) + "'")
return iterable
def groupByMsgType(messages):
"""
Returns a dictionary keyed by message type, with a list of one or
more message objects per message type.
"""
return groupByX(lambda x : x.getType(),
messages)
def groupBySource(messages):
"""
Returns a dictionary keyed by message source, with a list of one or
more message objects per message source.
"""
return groupByX(lambda x : x.getSource(),
messages)
def groupByX(grp_fn, messages):
"""
Returns a dictionary keyed by the requested group.
"""
m_grp = {}
for msg in getIterable(messages):
# Ignore messages that we don't have all the timing for.
if msg.isComplete() or not ignore_incomplete:
m_type = grp_fn(msg)
if m_type in m_grp:
m_grp[m_type].append(msg)
else:
m_grp[m_type] = [msg]
return m_grp
def logTiming(basename, ignore_incomplete = False):
"""
Returns a dictionary of Message objects keyed by their ID number.
"""
zero_time = None
messages = {}
for ext in [".5", ".4", ".3", ".2", ".1", ""]:
fname = basename + ".out" + ext
if not os.path.exists(fname):
print(fname, "not found.")
continue
with open(fname) as fp:
for line in fp:
try:
[time, command] = map(lambda x: x.strip(), line.split(":hal4000:INFO:"))
except ValueError:
continue
if zero_time is None:
zero_time = time
# Message queued.
if (command.startswith("queued,")):
[m_id, source, m_type] = command.split(",")[1:]
messages[m_id] = Message(m_type = m_type,
source = source,
time = time,
zero_time = zero_time)
# Message sent.
elif (command.startswith("sent,")):
m_id = command.split(",")[1]
messages[m_id].sent(time)
# Message processed.
elif (command.startswith("processed,")):
m_id = command.split(",")[1]
messages[m_id].processed(time)
elif (command.startswith("worker done,")):
m_id = command.split(",")[1]
messages[m_id].incNWorkers()
# Ignore messages that we don't have all the timing for.
if not ignore_incomplete:
temp = {}
for m_id in messages:
msg = messages[m_id]
if msg.isComplete():
temp[m_id] = msg
return temp
else:
return messages
def processingTime(messages):
"""
Returns the total processing time for a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getProcessingTime()
else:
accum_time += msg.getProcessingTime()
return accum_time
def queuedTime(messages):
"""
Returns the total queued time for a a collection of messages.
"""
accum_time = 0
for msg in getIterable(messages):
if isinstance(msg, list):
for elt in msg:
accum_time += elt.getQueuedTime()
else:
accum_time += msg.getQueuedTime()
return accum_time
if (__name__ == "__main__"):
import sys
if (len(sys.argv) != 2):
print("usage: <log file>")
exit()
messages = logTiming(sys.argv[1])
groups = groupByMsgType(messages)
print()
print("All messages:")
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total queued time {0:.3f} seconds".format(queuedTime(groups)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups)))
print()
print("Film messages:")
groups = groupByMsgType(groupBySource(messages)["film"])
for key in sorted(groups):
grp = groups[key]
print(key + ", {0:0d} counts, {1:.3f} seconds".format(len(grp), processingTime(grp)))
print("Total processing time {0:.3f} seconds".format(processingTime(groups))) | en | 0.818304 | #!/usr/bin/env python This parses a log file series (i.e. log, log.1, log.2, etc..) and outputs timing and call frequency information for HAL messages. Hazen 5/18 Storage for the timing of a single message. Returns the time when the message was created relative to first time in the log file in seconds. Return the number of workers (QRunnables) that were employed to process this message. Return time to process in seconds. Return time queued in seconds. Returns the source of a message. Return the message type. Returns true if we have all the timing data for this message. Returns an iterable given a dictionary of a list. Returns a dictionary keyed by message type, with a list of one or more message objects per message type. Returns a dictionary keyed by message source, with a list of one or more message objects per message source. Returns a dictionary keyed by the requested group. # Ignore messages that we don't have all the timing for. Returns a dictionary of Message objects keyed by their ID number. # Message queued. # Message sent. # Message processed. # Ignore messages that we don't have all the timing for. Returns the total processing time for a collection of messages. Returns the total queued time for a a collection of messages. | 3.123125 | 3 |
django_simple_jsonschema/management/commands/check_schema.py | 38elements/django-simple-jsonschema | 1 | 420 | from django.core.management.base import BaseCommand
from django.utils import termcolors
from jsonschema import Draft4Validator
from jsonschema.exceptions import SchemaError
import json
class Command(BaseCommand):
can_import_settings = True
@property
def _jsonschema_exist(self):
from django.conf import settings
if not hasattr(settings, 'SIMPLE_JSONSCHEMA'):
return False
return True
@property
def _jsonschema_errors(self):
from django.conf import settings
errors = []
schemas = settings.SIMPLE_JSONSCHEMA
for url, schema in schemas.items():
try:
Draft4Validator.check_schema(schema)
except SchemaError as e:
errors.append({
'url': url,
'error': e,
'schema': json.dumps(schema, indent=4, sort_keys=True)
})
return errors
def handle(self, *args, **options):
success = termcolors.make_style(fg='green')
error = termcolors.make_style(fg='red')
if not self._jsonschema_exist:
not_exist = '[' + error('ERROR') + '] SIMPLE_JSONSCHEMA is not exist in settings.'
self.stdout.write(not_exist)
return
errors = self._jsonschema_errors
if len(errors):
for e in errors:
title = '\n[' + error('ERROR') + '] schema of ' + str(e['url']) + ' is invalid.'
self.stdout.write(title)
self.stdout.write('path: ' + str(list(e['error'].path)))
self.stdout.write('message: ' + e['error'].message)
self.stdout.write('schema:\n' + e['schema'] + '\n')
else:
self.stdout.write('[' + success('SUCCESS') + '] All jsonschemas are OK.')
| from django.core.management.base import BaseCommand
from django.utils import termcolors
from jsonschema import Draft4Validator
from jsonschema.exceptions import SchemaError
import json
class Command(BaseCommand):
can_import_settings = True
@property
def _jsonschema_exist(self):
from django.conf import settings
if not hasattr(settings, 'SIMPLE_JSONSCHEMA'):
return False
return True
@property
def _jsonschema_errors(self):
from django.conf import settings
errors = []
schemas = settings.SIMPLE_JSONSCHEMA
for url, schema in schemas.items():
try:
Draft4Validator.check_schema(schema)
except SchemaError as e:
errors.append({
'url': url,
'error': e,
'schema': json.dumps(schema, indent=4, sort_keys=True)
})
return errors
def handle(self, *args, **options):
success = termcolors.make_style(fg='green')
error = termcolors.make_style(fg='red')
if not self._jsonschema_exist:
not_exist = '[' + error('ERROR') + '] SIMPLE_JSONSCHEMA is not exist in settings.'
self.stdout.write(not_exist)
return
errors = self._jsonschema_errors
if len(errors):
for e in errors:
title = '\n[' + error('ERROR') + '] schema of ' + str(e['url']) + ' is invalid.'
self.stdout.write(title)
self.stdout.write('path: ' + str(list(e['error'].path)))
self.stdout.write('message: ' + e['error'].message)
self.stdout.write('schema:\n' + e['schema'] + '\n')
else:
self.stdout.write('[' + success('SUCCESS') + '] All jsonschemas are OK.')
| none | 1 | 2.234132 | 2 |
|
lib/interface.py | keke185321/combine-copy- | 0 | 421 | import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
| import cv2, time
import numpy as np
import Tkinter
"""
Wraps up some interfaces to opencv user interface methods (displaying
image frames, event handling, etc).
If desired, an alternative UI could be built and imported into get_pulse.py
instead. Opencv is used to perform much of the data analysis, but there is no
reason it has to be used to handle the UI as well. It just happens to be very
effective for our purposes.
"""
def resize(*args, **kwargs):
return cv2.resize(*args, **kwargs)
def moveWindow(*args,**kwargs):
return
def imshow(root,args,kwargs):
image = cv2.cvtColor(output_frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
return Tkinter.Label(root, image=kwargs).pack()
#return cv2.imshow(*args,**kwargs)
def destroyWindow(*args,**kwargs):
return cv2.destroyWindow(*args,**kwargs)
def waitKey(*args,**kwargs):
return cv2.waitKey(*args,**kwargs)
"""
The rest of this file defines some GUI plotting functionality. There are plenty
of other ways to do simple x-y data plots in python, but this application uses
cv2.imshow to do real-time data plotting and handle user interaction.
This is entirely independent of the data calculation functions, so it can be
replaced in the get_pulse.py application easily.
"""
def combine(left, right):
"""Stack images horizontally.
"""
h = max(left.shape[0], right.shape[0])
w = left.shape[1] + right.shape[1]
hoff = left.shape[0]
shape = list(left.shape)
shape[0] = h
shape[1] = w
comb = np.zeros(tuple(shape),left.dtype)
# left will be on left, aligned top, with right on right
comb[:left.shape[0],:left.shape[1]] = left
comb[:right.shape[0],left.shape[1]:] = right
return comb
def plotXY(data,size = (280,640),margin = 25,name = "data",labels=[], skip = [],
showmax = [], bg = None,label_ndigits = [], showmax_digits=[]):
for x,y in data:
if len(x) < 2 or len(y) < 2:
return
n_plots = len(data)
w = float(size[1])
h = size[0]/float(n_plots)
z = np.zeros((size[0],size[1],3))
if isinstance(bg,np.ndarray):
wd = int(bg.shape[1]/bg.shape[0]*h )
bg = cv2.resize(bg,(wd,int(h)))
if len(bg.shape) == 3:
r = combine(bg[:,:,0],z[:,:,0])
g = combine(bg[:,:,1],z[:,:,1])
b = combine(bg[:,:,2],z[:,:,2])
else:
r = combine(bg,z[:,:,0])
g = combine(bg,z[:,:,1])
b = combine(bg,z[:,:,2])
z = cv2.merge([r,g,b])[:,:-wd,]
i = 0
P = []
for x,y in data:
x = np.array(x)
y = -np.array(y)
xx = (w-2*margin)*(x - x.min()) / (x.max() - x.min())+margin
yy = (h-2*margin)*(y - y.min()) / (y.max() - y.min())+margin + i*h
mx = max(yy)
if labels:
if labels[i]:
for ii in range(len(x)):
if ii%skip[i] == 0:
col = (255,255,255)
ss = '{0:.%sf}' % label_ndigits[i]
ss = ss.format(x[ii])
cv2.putText(z,ss,(int(xx[ii]),int((i+1)*h)),
cv2.FONT_HERSHEY_PLAIN,1,col)
if showmax:
if showmax[i]:
col = (0,255,0)
ii = np.argmax(-y)
ss = '{0:.%sf} %s' % (showmax_digits[i], showmax[i])
ss = ss.format(x[ii])
#"%0.0f %s" % (x[ii], showmax[i])
cv2.putText(z,ss,(int(xx[ii]),int((yy[ii]))),
cv2.FONT_HERSHEY_PLAIN,2,col)
try:
pts = np.array([[x_, y_] for x_, y_ in zip(xx,yy)],np.int32)
i+=1
P.append(pts)
except ValueError:
pass #temporary
"""
#Polylines seems to have some trouble rendering multiple polys for some people
for p in P:
cv2.polylines(z, [p], False, (255,255,255),1)
"""
#hack-y alternative:
for p in P:
for i in range(len(p)-1):
cv2.line(z,tuple(p[i]),tuple(p[i+1]), (255,255,255),1)
return z
#cv2.imshow(name,z)
| en | 0.884852 | Wraps up some interfaces to opencv user interface methods (displaying image frames, event handling, etc). If desired, an alternative UI could be built and imported into get_pulse.py instead. Opencv is used to perform much of the data analysis, but there is no reason it has to be used to handle the UI as well. It just happens to be very effective for our purposes. #return cv2.imshow(*args,**kwargs) The rest of this file defines some GUI plotting functionality. There are plenty of other ways to do simple x-y data plots in python, but this application uses cv2.imshow to do real-time data plotting and handle user interaction. This is entirely independent of the data calculation functions, so it can be replaced in the get_pulse.py application easily. Stack images horizontally. # left will be on left, aligned top, with right on right #"%0.0f %s" % (x[ii], showmax[i]) #temporary #Polylines seems to have some trouble rendering multiple polys for some people for p in P: cv2.polylines(z, [p], False, (255,255,255),1) #hack-y alternative: #cv2.imshow(name,z) | 3.134204 | 3 |
nltk/tag/brill.py | FGDBTKD/nltk | 0 | 422 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2018 NLTK Project
# Author: <NAME> <<EMAIL>>
# based on previous (nltk2) version by
# <NAME>, <NAME>, <NAME>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict, Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set(int(tid) for tid in tids)
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, str(tpl)))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
| # -*- coding: utf-8 -*-
# Natural Language Toolkit: Transformation-based learning
#
# Copyright (C) 2001-2018 NLTK Project
# Author: <NAME> <<EMAIL>>
# based on previous (nltk2) version by
# <NAME>, <NAME>, <NAME>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division
from collections import defaultdict, Counter
from nltk.tag import TaggerI
from nltk.tbl import Feature, Template
from nltk import jsontags
######################################################################
# Brill Templates
######################################################################
@jsontags.register_tag
class Word(Feature):
"""
Feature which examines the text (word) of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Word'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's text."""
return tokens[index][0]
@jsontags.register_tag
class Pos(Feature):
"""
Feature which examines the tags of nearby tokens.
"""
json_tag = 'nltk.tag.brill.Pos'
@staticmethod
def extract_property(tokens, index):
"""@return: The given token's tag."""
return tokens[index][1]
def nltkdemo18():
"""
Return 18 templates, from the original nltk demo, in multi-feature syntax
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-3, -2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-1]), Word([1])),
]
def nltkdemo18plus():
"""
Return 18 templates, from the original nltk demo, and additionally a few
multi-feature ones (the motivation is easy comparison with nltkdemo18)
"""
return nltkdemo18() + [
Template(Word([-1]), Pos([1])),
Template(Pos([-1]), Word([1])),
Template(Word([-1]), Word([0]), Pos([1])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-1]), Word([0]), Pos([1])),
]
def fntbl37():
"""
Return 37 templates taken from the postagging task of the
fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/
(37 is after excluding a handful which do not condition on Pos[0];
fntbl can do that but the current nltk implementation cannot.)
"""
return [
Template(Word([0]), Word([1]), Word([2])),
Template(Word([-1]), Word([0]), Word([1])),
Template(Word([0]), Word([-1])),
Template(Word([0]), Word([1])),
Template(Word([0]), Word([2])),
Template(Word([0]), Word([-2])),
Template(Word([1, 2])),
Template(Word([-2, -1])),
Template(Word([1, 2, 3])),
Template(Word([-3, -2, -1])),
Template(Word([0]), Pos([2])),
Template(Word([0]), Pos([-2])),
Template(Word([0]), Pos([1])),
Template(Word([0]), Pos([-1])),
Template(Word([0])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([1])),
Template(Word([-1])),
Template(Pos([-1]), Pos([1])),
Template(Pos([1]), Pos([2])),
Template(Pos([-1]), Pos([-2])),
Template(Pos([1])),
Template(Pos([-1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([1, 2, 3])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([-2, -1])),
Template(Pos([1]), Word([0]), Word([1])),
Template(Pos([1]), Word([0]), Word([-1])),
Template(Pos([-1]), Word([-1]), Word([0])),
Template(Pos([-1]), Word([0]), Word([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Pos([1]), Pos([2]), Word([1]))
]
def brill24():
"""
Return 24 templates of the seminal TBL paper, Brill (1995)
"""
return [
Template(Pos([-1])),
Template(Pos([1])),
Template(Pos([-2])),
Template(Pos([2])),
Template(Pos([-2, -1])),
Template(Pos([1, 2])),
Template(Pos([-3, -2, -1])),
Template(Pos([1, 2, 3])),
Template(Pos([-1]), Pos([1])),
Template(Pos([-2]), Pos([-1])),
Template(Pos([1]), Pos([2])),
Template(Word([-1])),
Template(Word([1])),
Template(Word([-2])),
Template(Word([2])),
Template(Word([-2, -1])),
Template(Word([1, 2])),
Template(Word([-1, 0])),
Template(Word([0, 1])),
Template(Word([0])),
Template(Word([-1]), Pos([-1])),
Template(Word([1]), Pos([1])),
Template(Word([0]), Word([-1]), Pos([-1])),
Template(Word([0]), Word([1]), Pos([1])),
]
def describe_template_sets():
"""
Print the available template sets in this demo, with a short description"
"""
import inspect
import sys
# a bit of magic to get all functions in this module
templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction)
for (name, obj) in templatesets:
if name == "describe_template_sets":
continue
print(name, obj.__doc__, "\n")
######################################################################
# The Brill Tagger
######################################################################
@jsontags.register_tag
class BrillTagger(TaggerI):
"""
Brill's transformational rule-based tagger. Brill taggers use an
initial tagger (such as ``tag.DefaultTagger``) to assign an initial
tag sequence to a text; and then apply an ordered list of
transformational rules to correct the tags of individual tokens.
These transformation rules are specified by the ``TagRule``
interface.
Brill taggers can be created directly, from an initial tagger and
a list of transformational rules; but more often, Brill taggers
are created by learning rules from a training corpus, using one
of the TaggerTrainers available.
"""
json_tag = 'nltk.tag.BrillTagger'
def __init__(self, initial_tagger, rules, training_stats=None):
"""
:param initial_tagger: The initial tagger
:type initial_tagger: TaggerI
:param rules: An ordered list of transformation rules that
should be used to correct the initial tagging.
:type rules: list(TagRule)
:param training_stats: A dictionary of statistics collected
during training, for possible later use
:type training_stats: dict
"""
self._initial_tagger = initial_tagger
self._rules = tuple(rules)
self._training_stats = training_stats
def encode_json_obj(self):
return self._initial_tagger, self._rules, self._training_stats
@classmethod
def decode_json_obj(cls, obj):
_initial_tagger, _rules, _training_stats = obj
return cls(_initial_tagger, _rules, _training_stats)
def rules(self):
"""
Return the ordered list of transformation rules that this tagger has learnt
:return: the ordered list of transformation rules that correct the initial tagging
:rtype: list of Rules
"""
return self._rules
def train_stats(self, statistic=None):
"""
Return a named statistic collected during training, or a dictionary of all
available statistics if no name given
:param statistic: name of statistic
:type statistic: str
:return: some statistic collected during training of this tagger
:rtype: any (but usually a number)
"""
if statistic is None:
return self._training_stats
else:
return self._training_stats.get(statistic)
def tag(self, tokens):
# Inherit documentation from TaggerI
# Run the initial tagger.
tagged_tokens = self._initial_tagger.tag(tokens)
# Create a dictionary that maps each tag to a list of the
# indices of tokens that have that tag.
tag_to_positions = defaultdict(set)
for i, (token, tag) in enumerate(tagged_tokens):
tag_to_positions[tag].add(i)
# Apply each rule, in order. Only try to apply rules at
# positions that have the desired original tag.
for rule in self._rules:
# Find the positions where it might apply
positions = tag_to_positions.get(rule.original_tag, [])
# Apply the rule at those positions.
changed = rule.apply(tagged_tokens, positions)
# Update tag_to_positions with the positions of tags that
# were modified.
for i in changed:
tag_to_positions[rule.original_tag].remove(i)
tag_to_positions[rule.replacement_tag].add(i)
return tagged_tokens
def print_template_statistics(self, test_stats=None, printunused=True):
"""
Print a list of all templates, ranked according to efficiency.
If test_stats is available, the templates are ranked according to their
relative contribution (summed for all rules created from a given template,
weighted by score) to the performance on the test set. If no test_stats, then
statistics collected during training are used instead. There is also
an unweighted measure (just counting the rules). This is less informative,
though, as many low-score rules will appear towards end of training.
:param test_stats: dictionary of statistics collected during testing
:type test_stats: dict of str -> any (but usually numbers)
:param printunused: if True, print a list of all unused templates
:type printunused: bool
:return: None
:rtype: None
"""
tids = [r.templateid for r in self._rules]
train_stats = self.train_stats()
trainscores = train_stats['rulescores']
assert len(trainscores) == len(tids), "corrupt statistics: " \
"{0} train scores for {1} rules".format(trainscores, tids)
template_counts = Counter(tids)
weighted_traincounts = Counter()
for (tid, score) in zip(tids, trainscores):
weighted_traincounts[tid] += score
tottrainscores = sum(trainscores)
# det_tplsort() is for deterministic sorting;
# the otherwise convenient Counter.most_common() unfortunately
# does not break ties deterministically
# between python versions and will break cross-version tests
def det_tplsort(tpl_value):
return (tpl_value[1], repr(tpl_value[0]))
def print_train_stats():
print("TEMPLATE STATISTICS (TRAIN) {0} templates, {1} rules)".format(
len(template_counts),
len(tids))
)
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
head = "#ID | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
train_tplscores = sorted(weighted_traincounts.items(), key=det_tplsort, reverse=True)
for (tid, trainscore) in train_tplscores:
s = "{0} | {1:5d} {2:5.3f} |{3:4d} {4:.3f} | {5}".format(
tid,
trainscore,
trainscore/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_testtrain_stats():
testscores = test_stats['rulescores']
print("TEMPLATE STATISTICS (TEST AND TRAIN) ({0} templates, {1} rules)".format(
len(template_counts),
len(tids)),
)
print("TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats))
print("TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} "
"final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats))
weighted_testcounts = Counter()
for (tid, score) in zip(tids, testscores):
weighted_testcounts[tid] += score
tottestscores = sum(testscores)
head = "#ID | Score (test) | Score (train) | #Rules | Template"
print(head, "\n", "-" * len(head), sep="")
test_tplscores = sorted(weighted_testcounts.items(), key=det_tplsort, reverse=True)
for (tid, testscore) in test_tplscores:
s = "{0:s} |{1:5d} {2:6.3f} | {3:4d} {4:.3f} |{5:4d} {6:.3f} | {7:s}".format(
tid,
testscore,
testscore/tottestscores,
weighted_traincounts[tid],
weighted_traincounts[tid]/tottrainscores,
template_counts[tid],
template_counts[tid]/len(tids),
Template.ALLTEMPLATES[int(tid)],
)
print(s)
def print_unused_templates():
usedtpls = set(int(tid) for tid in tids)
unused = [(tid, tpl) for (tid, tpl) in enumerate(Template.ALLTEMPLATES) if tid not in usedtpls]
print("UNUSED TEMPLATES ({0})".format(len(unused)))
for (tid, tpl) in unused:
print("{0:03d} {1:s}".format(tid, str(tpl)))
if test_stats is None:
print_train_stats()
else:
print_testtrain_stats()
print()
if printunused:
print_unused_templates()
print()
def batch_tag_incremental(self, sequences, gold):
"""
Tags by applying each rule to the entire corpus (rather than all rules to a
single sequence). The point is to collect statistics on the test set for
individual rules.
NOTE: This is inefficient (does not build any index, so will traverse the entire
corpus N times for N rules) -- usually you would not care about statistics for
individual rules and thus use batch_tag() instead
:param sequences: lists of token sequences (sentences, in some applications) to be tagged
:type sequences: list of list of strings
:param gold: the gold standard
:type gold: list of list of strings
:returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule))
"""
def counterrors(xs):
return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair))
testing_stats = {}
testing_stats['tokencount'] = sum(len(t) for t in sequences)
testing_stats['sequencecount'] = len(sequences)
tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences]
testing_stats['initialerrors'] = counterrors(tagged_tokenses)
testing_stats['initialacc'] = 1 - testing_stats['initialerrors']/testing_stats['tokencount']
# Apply each rule to the entire corpus, in order
errors = [testing_stats['initialerrors']]
for rule in self._rules:
for tagged_tokens in tagged_tokenses:
rule.apply(tagged_tokens)
errors.append(counterrors(tagged_tokenses))
testing_stats['rulescores'] = [err0 - err1 for (err0, err1) in zip(errors, errors[1:])]
testing_stats['finalerrors'] = errors[-1]
testing_stats['finalacc'] = 1 - testing_stats['finalerrors']/testing_stats['tokencount']
return (tagged_tokenses, testing_stats)
| en | 0.803401 | # -*- coding: utf-8 -*- # Natural Language Toolkit: Transformation-based learning # # Copyright (C) 2001-2018 NLTK Project # Author: <NAME> <<EMAIL>> # based on previous (nltk2) version by # <NAME>, <NAME>, <NAME> # URL: <http://nltk.org/> # For license information, see LICENSE.TXT ###################################################################### # Brill Templates ###################################################################### Feature which examines the text (word) of nearby tokens. @return: The given token's text. Feature which examines the tags of nearby tokens. @return: The given token's tag. Return 18 templates, from the original nltk demo, in multi-feature syntax Return 18 templates, from the original nltk demo, and additionally a few multi-feature ones (the motivation is easy comparison with nltkdemo18) Return 37 templates taken from the postagging task of the fntbl distribution http://www.cs.jhu.edu/~rflorian/fntbl/ (37 is after excluding a handful which do not condition on Pos[0]; fntbl can do that but the current nltk implementation cannot.) Return 24 templates of the seminal TBL paper, Brill (1995) Print the available template sets in this demo, with a short description" # a bit of magic to get all functions in this module ###################################################################### # The Brill Tagger ###################################################################### Brill's transformational rule-based tagger. Brill taggers use an initial tagger (such as ``tag.DefaultTagger``) to assign an initial tag sequence to a text; and then apply an ordered list of transformational rules to correct the tags of individual tokens. These transformation rules are specified by the ``TagRule`` interface. Brill taggers can be created directly, from an initial tagger and a list of transformational rules; but more often, Brill taggers are created by learning rules from a training corpus, using one of the TaggerTrainers available. :param initial_tagger: The initial tagger :type initial_tagger: TaggerI :param rules: An ordered list of transformation rules that should be used to correct the initial tagging. :type rules: list(TagRule) :param training_stats: A dictionary of statistics collected during training, for possible later use :type training_stats: dict Return the ordered list of transformation rules that this tagger has learnt :return: the ordered list of transformation rules that correct the initial tagging :rtype: list of Rules Return a named statistic collected during training, or a dictionary of all available statistics if no name given :param statistic: name of statistic :type statistic: str :return: some statistic collected during training of this tagger :rtype: any (but usually a number) # Inherit documentation from TaggerI # Run the initial tagger. # Create a dictionary that maps each tag to a list of the # indices of tokens that have that tag. # Apply each rule, in order. Only try to apply rules at # positions that have the desired original tag. # Find the positions where it might apply # Apply the rule at those positions. # Update tag_to_positions with the positions of tags that # were modified. Print a list of all templates, ranked according to efficiency. If test_stats is available, the templates are ranked according to their relative contribution (summed for all rules created from a given template, weighted by score) to the performance on the test set. If no test_stats, then statistics collected during training are used instead. There is also an unweighted measure (just counting the rules). This is less informative, though, as many low-score rules will appear towards end of training. :param test_stats: dictionary of statistics collected during testing :type test_stats: dict of str -> any (but usually numbers) :param printunused: if True, print a list of all unused templates :type printunused: bool :return: None :rtype: None # det_tplsort() is for deterministic sorting; # the otherwise convenient Counter.most_common() unfortunately # does not break ties deterministically # between python versions and will break cross-version tests #Rules | Template" #Rules | Template" Tags by applying each rule to the entire corpus (rather than all rules to a single sequence). The point is to collect statistics on the test set for individual rules. NOTE: This is inefficient (does not build any index, so will traverse the entire corpus N times for N rules) -- usually you would not care about statistics for individual rules and thus use batch_tag() instead :param sequences: lists of token sequences (sentences, in some applications) to be tagged :type sequences: list of list of strings :param gold: the gold standard :type gold: list of list of strings :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule)) # Apply each rule to the entire corpus, in order | 3.12921 | 3 |
superglue_parsers/wsc.py | agentsolaris/xlnn | 0 | 423 | import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
xlnet_tokens = []
xlnet_token_ids = []
xlnet_token_masks = []
xlnet_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
xlnet_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
else:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
xlnet_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
else:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ xlnet_tokens_sub1
+ xlnet_tokens_sub2
+ xlnet_tokens_sub3
+ xlnet_tokens_sub4
+ xlnet_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
xlnet_tokens.append(tokens)
xlnet_token_ids.append(torch.LongTensor(token_ids))
xlnet_token_masks.append(torch.LongTensor(token_masks))
xlnet_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": xlnet_tokens,
"token_ids": xlnet_token_ids,
"token_masks": xlnet_token_masks,
"token_segments": xlnet_token_segments,
},
Y_dict={"labels": labels},
)
| import json
import logging
import sys
import numpy as np
import torch
from task_config import SuperGLUE_LABEL_MAPPING
from snorkel.mtl.data import MultitaskDataset
sys.path.append("..") # Adds higher directory to python modules path.
logger = logging.getLogger(__name__)
TASK_NAME = "WSC"
def get_char_index(text, span_text, span_index):
tokens = text.replace("\n", " ").lower().split(" ")
span_tokens = span_text.replace("\n", " ").lower().split(" ")
# Token exact match
if tokens[span_index : span_index + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(span_text)
return st, ed
if span_index < len(tokens):
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_text.startswith(char_in_span):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
# ed = st + len(char_in_span)
ed = st + len(char_in_text)
return st, ed
# Token fuzzy match with extra chars
char_in_text = " ".join(tokens[span_index : span_index + len(span_tokens)])
char_in_span = " ".join(span_tokens)
if char_in_span.startswith(char_in_text):
st = len(" ".join(tokens[:span_index])) + 1 if span_index != 0 else 0
ed = st + len(char_in_text)
return st, ed
# Index out of range
if span_index >= len(tokens):
span_index -= 10
# Token fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx : idx + len(span_tokens)] == span_tokens:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(span_text)
return st, ed
# Token best fuzzy match with different position
for idx in range(span_index, len(tokens)):
if tokens[idx] == span_tokens[0]:
for length in range(1, len(span_tokens)):
if tokens[idx : idx + length] != span_tokens[:length]:
st = len(" ".join(tokens[:idx])) + 1 if idx != 0 else 0
ed = st + len(" ".join(span_tokens[: length - 1]))
return st, ed
return None
def parse(jsonl_path, tokenizer, max_data_samples, max_sequence_length):
logger.info(f"Loading data from {jsonl_path}.")
rows = [json.loads(row) for row in open(jsonl_path, encoding="utf-8")]
for i in range(2):
logger.info(f"Sample {i}: {rows[i]}")
# Truncate to max_data_samples
if max_data_samples:
rows = rows[:max_data_samples]
logger.info(f"Truncating to {max_data_samples} samples.")
# sentence text
sentences = []
# span1
span1s = []
# span2
span2s = []
# span1 idx
span1_idxs = []
# span2 idx
span2_idxs = []
# label
labels = []
token1_idxs = []
token2_idxs = []
xlnet_tokens = []
xlnet_token_ids = []
xlnet_token_masks = []
xlnet_token_segments = []
# Check the maximum token length
max_len = -1
for row in rows:
index = row["idx"]
text = row["text"]
span1_text = row["target"]["span1_text"]
span2_text = row["target"]["span2_text"]
span1_index = row["target"]["span1_index"]
span2_index = row["target"]["span2_index"]
label = row["label"] if "label" in row else True
span1_char_index = get_char_index(text, span1_text, span1_index)
span2_char_index = get_char_index(text, span2_text, span2_index)
assert span1_char_index is not None, f"Check example {id} in {jsonl_path}"
assert span2_char_index is not None, f"Check example {id} in {jsonl_path}"
# Tokenize sentences
xlnet_tokens_sub1 = tokenizer.tokenize(
text[: min(span1_char_index[0], span2_char_index[0])]
)
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
token1_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
else:
xlnet_tokens_sub2 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
token2_idx = [
len(xlnet_tokens_sub1) + 1,
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2),
]
sub3_st = (
span1_char_index[1]
if span1_char_index[0] < span2_char_index[0]
else span2_char_index[1]
)
sub3_ed = (
span1_char_index[0]
if span1_char_index[0] > span2_char_index[0]
else span2_char_index[0]
)
xlnet_tokens_sub3 = tokenizer.tokenize(text[sub3_st:sub3_ed])
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span2_char_index[0] : span2_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token2_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
else:
xlnet_tokens_sub4 = tokenizer.tokenize(
text[span1_char_index[0] : span1_char_index[1]]
)
cur_len = (
len(xlnet_tokens_sub1) + len(xlnet_tokens_sub2) + len(xlnet_tokens_sub3)
)
token1_idx = [cur_len + 1, cur_len + len(xlnet_tokens_sub4)]
if span1_char_index[0] < span2_char_index[0]:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span2_char_index[1] :])
else:
xlnet_tokens_sub5 = tokenizer.tokenize(text[span1_char_index[1] :])
tokens = (
["[CLS]"]
+ xlnet_tokens_sub1
+ xlnet_tokens_sub2
+ xlnet_tokens_sub3
+ xlnet_tokens_sub4
+ xlnet_tokens_sub5
+ ["[SEP]"]
)
if len(tokens) > max_len:
max_len = len(tokens)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
token_segments = [0] * len(token_ids)
# Generate mask where 1 for real tokens and 0 for padding tokens
token_masks = [1] * len(token_ids)
token1_idxs.append(token1_idx)
token2_idxs.append(token2_idx)
sentences.append(text)
span1s.append(span1_text)
span2s.append(span2_text)
span1_idxs.append(span1_index)
span2_idxs.append(span2_index)
labels.append(SuperGLUE_LABEL_MAPPING[TASK_NAME][label])
xlnet_tokens.append(tokens)
xlnet_token_ids.append(torch.LongTensor(token_ids))
xlnet_token_masks.append(torch.LongTensor(token_masks))
xlnet_token_segments.append(torch.LongTensor(token_segments))
token1_idxs = torch.from_numpy(np.array(token1_idxs))
token2_idxs = torch.from_numpy(np.array(token2_idxs))
labels = torch.from_numpy(np.array(labels))
logger.info(f"Max token len {max_len}")
return MultitaskDataset(
name="SuperGLUE",
X_dict={
"sentence": sentences,
"span1": span1s,
"span2": span2s,
"span1_idx": span1_idxs,
"span2_idx": span2_idxs,
"token1_idx": token1_idxs,
"token2_idx": token2_idxs,
"tokens": xlnet_tokens,
"token_ids": xlnet_token_ids,
"token_masks": xlnet_token_masks,
"token_segments": xlnet_token_segments,
},
Y_dict={"labels": labels},
)
| en | 0.54862 | # Adds higher directory to python modules path. # Token exact match # Token fuzzy match with extra chars # ed = st + len(char_in_span) # Token fuzzy match with extra chars # Index out of range # Token fuzzy match with different position # Token best fuzzy match with different position # Truncate to max_data_samples # sentence text # span1 # span2 # span1 idx # span2 idx # label # Check the maximum token length # Tokenize sentences # Generate mask where 1 for real tokens and 0 for padding tokens | 1.948115 | 2 |
simplimental/simplimental.py | TimmyCarbone/simplimental | 2 | 424 | import re
import json
__all__ = ["Simplimental"]
class Simplimental:
def __init__(self, text="This is not a bad idea"):
self.text = text
with open('simplimental/data/afinn.json') as data_file:
self.dictionary = json.load(data_file)
no_punctunation = re.sub(r"[^a-zA-Z ]+", " ", self.text)
self.tokens = no_punctunation.lower().split(" ")
for t in self.tokens:
if len(t) < 3 and t not in ["no"]:
self.tokens.remove(t)
def negativity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score > 0 else 0
if score < 0:
hits -= score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def positivity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score < 0 else 0
if score > 0:
hits += score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def analyze(self):
negativity = self.negativity()
positivity = self.positivity()
return {
"score": positivity["score"] - negativity["score"],
"comparative": positivity["comparative"] - negativity["comparative"],
}
| import re
import json
__all__ = ["Simplimental"]
class Simplimental:
def __init__(self, text="This is not a bad idea"):
self.text = text
with open('simplimental/data/afinn.json') as data_file:
self.dictionary = json.load(data_file)
no_punctunation = re.sub(r"[^a-zA-Z ]+", " ", self.text)
self.tokens = no_punctunation.lower().split(" ")
for t in self.tokens:
if len(t) < 3 and t not in ["no"]:
self.tokens.remove(t)
def negativity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score > 0 else 0
if score < 0:
hits -= score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def positivity(self):
hits = 0
words = []
for i in range(len(self.tokens)):
word = self.tokens[i]
score = self.dictionary.get(word, 0)
if i > 0 and self.tokens[i-1] in ["no", "not"]:
word = "not_" + word
score = -score if score < 0 else 0
if score > 0:
hits += score
words.append(word)
return {
"score": hits,
"comparative": float(hits) / len(self.tokens),
"words": words
}
def analyze(self):
negativity = self.negativity()
positivity = self.positivity()
return {
"score": positivity["score"] - negativity["score"],
"comparative": positivity["comparative"] - negativity["comparative"],
}
| none | 1 | 3.45075 | 3 |
|
Python/Examples/Macros/SettingsAxesOptimization.py | archformco/RoboDK-API | 161 | 425 | # This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
| # This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string.
# You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings.
# It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API.
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# For more information visit:
# https://robodk.com/doc/en/PythonAPI/robolink.html
from robolink import * # RoboDK API
# JSON tools
import json
# Start the RoboDK API
RDK = Robolink()
# Ask the user to select a robot arm (6 axis robot wich can have external axes)
robot = RDK.ItemUserPick("Select a robot arm",ITEM_TYPE_ROBOT_ARM)
# Default optimization settings test template
AxesOptimSettings = {
# Optimization parameters:
"Active": 1, # Use generic axes optimization: 0=Disabled or 1=Enabled
"Algorithm": 2, # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead
"MaxIter": 650, # Max. number of iterations
"Tol": 0.0016, # Tolerance to stop iterations
# Absolute Reference joints (double):
"AbsJnt_1": 104.17,
"AbsJnt_2": 11.22,
"AbsJnt_3": 15.97,
"AbsJnt_4": -87.48,
"AbsJnt_5": -75.36,
"AbsJnt_6": 63.03,
"AbsJnt_7": 174.13,
"AbsJnt_8": 173.60,
"AbsJnt_9": 0,
# Using Absolute reference joints (0: No, 1: Yes):
"AbsOn_1": 1,
"AbsOn_2": 1,
"AbsOn_3": 1,
"AbsOn_4": 1,
"AbsOn_5": 1,
"AbsOn_6": 1,
"AbsOn_7": 1,
"AbsOn_8": 1,
"AbsOn_9": 1,
# Weight for absolute reference joints (double):
"AbsW_1": 100,
"AbsW_2": 100,
"AbsW_3": 100,
"AbsW_4": 89,
"AbsW_5": 90,
"AbsW_6": 92,
"AbsW_7": 92,
"AbsW_8": 96,
"AbsW_9": 50,
# Using for relative joint motion smoothing (0: No, 1: Yes):
"RelOn_1": 1,
"RelOn_2": 1,
"RelOn_3": 1,
"RelOn_4": 1,
"RelOn_5": 1,
"RelOn_6": 1,
"RelOn_7": 1,
"RelOn_8": 1,
"RelOn_9": 1,
# Weight for relative joint motion (double):
"RelW_1": 5,
"RelW_2": 47,
"RelW_3": 44,
"RelW_4": 43,
"RelW_5": 36,
"RelW_6": 47,
"RelW_7": 53,
"RelW_8": 59,
"RelW_9": 0,
}
# Update one value, for example, make it active:
ToUpdate = {}
ToUpdate["Active"] = 1
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Example to make a partial or full update
count = 1
while True:
for i in range(7):
# Partial update
ToUpdate = {}
ToUpdate["AbsJnt_" + str(i+1)] = (count+i)*4
ToUpdate["AbsOn_" + str(i+1)] = count % 2
ToUpdate["AbsW_" + str(i+1)] = (count+i)
json_str = json.dumps(json.dumps(ToUpdate))
status = robot.setParam("OptimAxes", json_str)
print(status)
# Full update
#OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4
#OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i)
#OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2
# Full update
#print(robot.setParam("OptimAxes", str(AxesOptimSettings)))
count = count + 1
# Read settings
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
# Example to read the current axes optimization settings:
while True:
json_data = robot.setParam("OptimAxes")
json_object = json.loads(json_data)
print(json.dumps(json_object, indent=4))
pause(0.2)
| en | 0.681711 | # This example shows how to read or modify the Axes Optimization settings using the RoboDK API and a JSON string. # You can select "Axes optimization" in a robot machining menu or the robot parameters to view the axes optimization settings. # It is possible to update the axes optimization settings attached to a robot or a robot machining project manually or using the API. # # More information about the RoboDK API here: # https://robodk.com/doc/en/RoboDK-API.html # For more information visit: # https://robodk.com/doc/en/PythonAPI/robolink.html # RoboDK API # JSON tools # Start the RoboDK API # Ask the user to select a robot arm (6 axis robot wich can have external axes) # Default optimization settings test template # Optimization parameters: # Use generic axes optimization: 0=Disabled or 1=Enabled # Optimization algorithm to use: 1=Nelder Mead, 2=Samples, 3=Samples+Nelder Mead # Max. number of iterations # Tolerance to stop iterations # Absolute Reference joints (double): # Using Absolute reference joints (0: No, 1: Yes): # Weight for absolute reference joints (double): # Using for relative joint motion smoothing (0: No, 1: Yes): # Weight for relative joint motion (double): # Update one value, for example, make it active: # Example to make a partial or full update # Partial update # Full update #OptimAxes_TEST["RefJoint_" + str(i+1)] = (count+i)*4 #OptimAxes_TEST["RefWeight_" + str(i+1)] = (count+i) #OptimAxes_TEST["RefOn_" + str(i+1)] = count % 2 # Full update #print(robot.setParam("OptimAxes", str(AxesOptimSettings))) # Read settings # Example to read the current axes optimization settings: | 3.043772 | 3 |
tests/test_grammar.py | Vipul97/SLR-Parser | 5 | 426 | <filename>tests/test_grammar.py
from slr_parser.grammar import Grammar
import unittest
class TestGrammar(unittest.TestCase):
def test_grammar(self):
with open('tests/test_grammar.txt') as grammar_file:
self.G = Grammar(grammar_file.read())
self.assertDictEqual(
{'E': {('E', '+', 'T'), ('T',)}, 'T': {('T', '*', 'F'), ('F',)}, 'F': {('(', 'E', ')'), ('id',)}},
self.G.grammar)
self.assertEqual('E', self.G.start)
self.assertSetEqual({'+', '*', '(', ')', 'id'}, self.G.terminals)
self.assertSetEqual({'E', 'T', 'F'}, self.G.nonterminals)
self.assertSetEqual({'+', '*', '(', ')', 'id', 'E', 'T', 'F'}, self.G.symbols)
self.grammar_str = ["""E -> E + T
e -> T
T -> T * F | F
F -> ( E )
F -> id""", """E -> E ^ + T
E -> T
T -> T * F | F
F -> ( E )
F -> id"""]
with self.assertRaises(ValueError):
Grammar(self.grammar_str[0])
with self.assertRaises(ValueError):
Grammar(self.grammar_str[1])
if __name__ == '__main__':
unittest.main()
| <filename>tests/test_grammar.py
from slr_parser.grammar import Grammar
import unittest
class TestGrammar(unittest.TestCase):
def test_grammar(self):
with open('tests/test_grammar.txt') as grammar_file:
self.G = Grammar(grammar_file.read())
self.assertDictEqual(
{'E': {('E', '+', 'T'), ('T',)}, 'T': {('T', '*', 'F'), ('F',)}, 'F': {('(', 'E', ')'), ('id',)}},
self.G.grammar)
self.assertEqual('E', self.G.start)
self.assertSetEqual({'+', '*', '(', ')', 'id'}, self.G.terminals)
self.assertSetEqual({'E', 'T', 'F'}, self.G.nonterminals)
self.assertSetEqual({'+', '*', '(', ')', 'id', 'E', 'T', 'F'}, self.G.symbols)
self.grammar_str = ["""E -> E + T
e -> T
T -> T * F | F
F -> ( E )
F -> id""", """E -> E ^ + T
E -> T
T -> T * F | F
F -> ( E )
F -> id"""]
with self.assertRaises(ValueError):
Grammar(self.grammar_str[0])
with self.assertRaises(ValueError):
Grammar(self.grammar_str[1])
if __name__ == '__main__':
unittest.main()
| en | 0.723021 | E -> E + T e -> T T -> T * F | F F -> ( E ) F -> id E -> E ^ + T E -> T T -> T * F | F F -> ( E ) F -> id | 3.190488 | 3 |
insight/migrations/0001_initial.py | leonhead/chess-insight | 0 | 427 | # Generated by Django 3.1 on 2020-09-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpeningSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Opening',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('eco', models.CharField(max_length=3)),
('moves', models.TextField()),
('opening_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.openingsystem')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elo_mean', models.IntegerField(default=0)),
('elo_diff', models.IntegerField(default=0)),
('result', models.CharField(max_length=40)),
('timecontrol', models.CharField(max_length=40)),
('timestamp', models.DateTimeField()),
('raw', models.TextField()),
('opening', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.opening')),
],
),
migrations.CreateModel(
name='Analyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turnover_move', models.IntegerField(default=0)),
('turnover_evaluation', models.IntegerField(default=0)),
('unbalance_material', models.IntegerField(default=0)),
('unbalance_officers', models.IntegerField(default=0)),
('unbalance_exchange', models.IntegerField(default=0)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.game')),
],
),
]
| # Generated by Django 3.1 on 2020-09-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpeningSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Opening',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('eco', models.CharField(max_length=3)),
('moves', models.TextField()),
('opening_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.openingsystem')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elo_mean', models.IntegerField(default=0)),
('elo_diff', models.IntegerField(default=0)),
('result', models.CharField(max_length=40)),
('timecontrol', models.CharField(max_length=40)),
('timestamp', models.DateTimeField()),
('raw', models.TextField()),
('opening', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.opening')),
],
),
migrations.CreateModel(
name='Analyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turnover_move', models.IntegerField(default=0)),
('turnover_evaluation', models.IntegerField(default=0)),
('unbalance_material', models.IntegerField(default=0)),
('unbalance_officers', models.IntegerField(default=0)),
('unbalance_exchange', models.IntegerField(default=0)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.game')),
],
),
]
| en | 0.812487 | # Generated by Django 3.1 on 2020-09-08 07:43 | 1.848667 | 2 |
tests/test_merge.py | jmerizia/parallel-pytorch | 0 | 428 | <filename>tests/test_merge.py
import torch
import numpy as np
from mpi4py import MPI
from parallel_pytorch.ops import tensor_merge
from parallel_pytorch.utils import abort_on_exception
@abort_on_exception
def test_1():
worker_shape = [2, 2]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
if comm.Get_rank() == 0:
x = torch.tensor([[0, 1], [4, 5]])
elif comm.Get_rank() == 1:
x = torch.tensor([[2, 3], [6, 7]])
elif comm.Get_rank() == 2:
x = torch.tensor([[8, 9], [12, 13]])
elif comm.Get_rank() == 3:
x = torch.tensor([[10, 11], [14, 15]])
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
if comm.Get_rank() == 0:
e = torch.tensor([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
])
assert torch.allclose(x, e), f'{x} != {e}'
@abort_on_exception
def test_2():
x_shape = [2, 2]
worker_shape = [1, 1]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
volume = np.array(x_shape).prod()
x = torch.arange(volume).view(x_shape)
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
e = torch.tensor([[0, 1], [2, 3]])
assert torch.allclose(x, e), f'{x} != {e}'
def run_all():
test_1()
test_2()
if __name__ == '__main__':
run_all()
| <filename>tests/test_merge.py
import torch
import numpy as np
from mpi4py import MPI
from parallel_pytorch.ops import tensor_merge
from parallel_pytorch.utils import abort_on_exception
@abort_on_exception
def test_1():
worker_shape = [2, 2]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
if comm.Get_rank() == 0:
x = torch.tensor([[0, 1], [4, 5]])
elif comm.Get_rank() == 1:
x = torch.tensor([[2, 3], [6, 7]])
elif comm.Get_rank() == 2:
x = torch.tensor([[8, 9], [12, 13]])
elif comm.Get_rank() == 3:
x = torch.tensor([[10, 11], [14, 15]])
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
if comm.Get_rank() == 0:
e = torch.tensor([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15],
])
assert torch.allclose(x, e), f'{x} != {e}'
@abort_on_exception
def test_2():
x_shape = [2, 2]
worker_shape = [1, 1]
world = MPI.COMM_WORLD
num_workers = np.array(worker_shape).prod()
comm = MPI.COMM_WORLD.Split(color=0 if world.Get_rank() < num_workers else 1, key=world.Get_rank())
if world.Get_rank() < num_workers:
volume = np.array(x_shape).prod()
x = torch.arange(volume).view(x_shape)
x = tensor_merge(x, comm=comm, worker_shape=worker_shape)
e = torch.tensor([[0, 1], [2, 3]])
assert torch.allclose(x, e), f'{x} != {e}'
def run_all():
test_1()
test_2()
if __name__ == '__main__':
run_all()
| none | 1 | 2.120774 | 2 |
|
day07/main.py | tebriel/aoc2021 | 0 | 429 | <filename>day07/main.py
"""Day 07"""
def process(filename):
with open(filename) as infile:
positions = [int(x) for x in infile.readline().strip().split(',')]
min_x = min(positions)
max_x = max(positions)
costs = {x: 0 for x in range(min_x, max_x + 1)}
for pos in costs.keys():
for crab in positions:
distance = abs(pos - crab)
costs[pos] += ((distance * distance) + distance) // 2
print(f"Day 07: {min(costs.values())}")
if __name__ == '__main__':
process('test.txt')
process('input.txt')
| <filename>day07/main.py
"""Day 07"""
def process(filename):
with open(filename) as infile:
positions = [int(x) for x in infile.readline().strip().split(',')]
min_x = min(positions)
max_x = max(positions)
costs = {x: 0 for x in range(min_x, max_x + 1)}
for pos in costs.keys():
for crab in positions:
distance = abs(pos - crab)
costs[pos] += ((distance * distance) + distance) // 2
print(f"Day 07: {min(costs.values())}")
if __name__ == '__main__':
process('test.txt')
process('input.txt')
| none | 1 | 3.195038 | 3 |
|
src/ceres_infer/utils.py | pritchardlabatpsu/cga | 0 | 430 | <filename>src/ceres_infer/utils.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
utilities
@author: boyangzhao
"""
import pandas as pd
import re
def int2ordinal(n):
# partially based on https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement
if (type(n) is int) or n.isdigit():
if type(n) is not int:
n = int(n)
return "%d%s"%(n,{1:"st",2:"nd",3:"rd"}.get(n if n<20 else n%10,"th"))
else:
return n
def getFeatGene(x, firstOnly = False):
# get gene
if pd.isnull(x):
return ''
r = re.findall('([^,\()]*)\s(\(([^,]*)\)\s)*\[([^,]*)\]',x)
if firstOnly:
return r[0][0]
else:
return [n[0] for n in r]
def getFeatSource(x, firstOnly = False):
# get the data source
if(pd.isnull(x)):
return ''
r = re.findall('[^,\()]*\s(\([^,]*\)\s)*\[([^,]*)\]',x)
if firstOnly:
return [n[1] for n in r][0]
else:
return [n[1] for n in r]
def pd_filter(df, idx):
# filters a pandas data frame, given idx
# this is a safe filter such that if one of the idx is not found, they are ignored
if idx is None:
return df
if type(idx) is not list:
idx = [idx]
idx = [n for n in idx if n in df.index]
return df.loc[idx, :]
| <filename>src/ceres_infer/utils.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
utilities
@author: boyangzhao
"""
import pandas as pd
import re
def int2ordinal(n):
# partially based on https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement
if (type(n) is int) or n.isdigit():
if type(n) is not int:
n = int(n)
return "%d%s"%(n,{1:"st",2:"nd",3:"rd"}.get(n if n<20 else n%10,"th"))
else:
return n
def getFeatGene(x, firstOnly = False):
# get gene
if pd.isnull(x):
return ''
r = re.findall('([^,\()]*)\s(\(([^,]*)\)\s)*\[([^,]*)\]',x)
if firstOnly:
return r[0][0]
else:
return [n[0] for n in r]
def getFeatSource(x, firstOnly = False):
# get the data source
if(pd.isnull(x)):
return ''
r = re.findall('[^,\()]*\s(\([^,]*\)\s)*\[([^,]*)\]',x)
if firstOnly:
return [n[1] for n in r][0]
else:
return [n[1] for n in r]
def pd_filter(df, idx):
# filters a pandas data frame, given idx
# this is a safe filter such that if one of the idx is not found, they are ignored
if idx is None:
return df
if type(idx) is not list:
idx = [idx]
idx = [n for n in idx if n in df.index]
return df.loc[idx, :]
| en | 0.796364 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- utilities @author: boyangzhao # partially based on https://stackoverflow.com/questions/9647202/ordinal-numbers-replacement # get gene # get the data source # filters a pandas data frame, given idx # this is a safe filter such that if one of the idx is not found, they are ignored | 2.865777 | 3 |
MAIN/Screens/Settings/category_2/__init__.py | aragubas/fogoso | 0 | 431 | <filename>MAIN/Screens/Settings/category_2/__init__.py
#!/usr/bin/python3.7
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -- Imports -- #
from ENGINE import APPDATA as reg
from ENGINE import UTILS as utils
import ENGINE as tge
from Fogoso.MAIN import ClassesUtils as gameObjs
from Fogoso import MAIN as gameMain
import pygame, sys
import importlib
import time
from random import randint
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton
OptionsScreen_NumberFormatting = gameObjs.UpDownButton
ElementsX = 0
ElementsY = 0
def Initialize():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton(0,0,14)
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton(0,0,14)
OptionsScreen_NumberFormatting = gameObjs.UpDownButton(0,0,14)
def Update():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
global ElementsX
global ElementsY
if OptionsScreen_DebugModeEnabled .ButtonState == 2 or OptionsScreen_DebugModeEnabled.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "True")
if OptionsScreen_RandomWindowTitle .ButtonState == 2 or OptionsScreen_RandomWindowTitle.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "True")
if OptionsScreen_NumberFormatting .ButtonState == 2 or OptionsScreen_NumberFormatting.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "True")
OptionsScreen_DebugModeEnabled.Set_X(ElementsX + 20)
OptionsScreen_RandomWindowTitle.Set_X(ElementsX + 20)
OptionsScreen_NumberFormatting.Set_X(ElementsX + 20)
OptionsScreen_DebugModeEnabled.Set_Y(ElementsY + 50)
OptionsScreen_RandomWindowTitle.Set_Y(ElementsY + 75)
OptionsScreen_NumberFormatting.Set_Y(ElementsY + 100)
def Render(DISPLAY):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Render(DISPLAY)
OptionsScreen_RandomWindowTitle.Render(DISPLAY)
OptionsScreen_NumberFormatting.Render(DISPLAY)
# -- Debug Mode -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/debug_mode") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled")), (240, 240, 240), ElementsX + 95, ElementsY + 52, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Random Title -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/random_title") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title")), (240, 240, 240), ElementsX + 95, ElementsY + 77, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Number Formatting -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/number_formatting") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers")), (240, 240, 240), ElementsX + 95, ElementsY + 102, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
def EventUpdate(event):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Update(event)
OptionsScreen_RandomWindowTitle.Update(event)
OptionsScreen_NumberFormatting.Update(event) | <filename>MAIN/Screens/Settings/category_2/__init__.py
#!/usr/bin/python3.7
# Copyright 2020 Aragubas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# -- Imports -- #
from ENGINE import APPDATA as reg
from ENGINE import UTILS as utils
import ENGINE as tge
from Fogoso.MAIN import ClassesUtils as gameObjs
from Fogoso import MAIN as gameMain
import pygame, sys
import importlib
import time
from random import randint
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton
OptionsScreen_NumberFormatting = gameObjs.UpDownButton
ElementsX = 0
ElementsY = 0
def Initialize():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled = gameObjs.UpDownButton(0,0,14)
OptionsScreen_RandomWindowTitle = gameObjs.UpDownButton(0,0,14)
OptionsScreen_NumberFormatting = gameObjs.UpDownButton(0,0,14)
def Update():
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
global ElementsX
global ElementsY
if OptionsScreen_DebugModeEnabled .ButtonState == 2 or OptionsScreen_DebugModeEnabled.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/debug_enabled", "True")
if OptionsScreen_RandomWindowTitle .ButtonState == 2 or OptionsScreen_RandomWindowTitle.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/random_title", "True")
if OptionsScreen_NumberFormatting .ButtonState == 2 or OptionsScreen_NumberFormatting.ButtonState == 1:
current_val = gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers", bool)
if current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "False")
if not current_val:
gameMain.DefaultCnt.Write_RegKey("/OPTIONS/format_numbers", "True")
OptionsScreen_DebugModeEnabled.Set_X(ElementsX + 20)
OptionsScreen_RandomWindowTitle.Set_X(ElementsX + 20)
OptionsScreen_NumberFormatting.Set_X(ElementsX + 20)
OptionsScreen_DebugModeEnabled.Set_Y(ElementsY + 50)
OptionsScreen_RandomWindowTitle.Set_Y(ElementsY + 75)
OptionsScreen_NumberFormatting.Set_Y(ElementsY + 100)
def Render(DISPLAY):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Render(DISPLAY)
OptionsScreen_RandomWindowTitle.Render(DISPLAY)
OptionsScreen_NumberFormatting.Render(DISPLAY)
# -- Debug Mode -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/debug_mode") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/debug_enabled")), (240, 240, 240), ElementsX + 95, ElementsY + 52, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Random Title -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/random_title") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/random_title")), (240, 240, 240), ElementsX + 95, ElementsY + 77, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
# -- Number Formatting -- #
gameMain.DefaultCnt.FontRender(DISPLAY, "/PressStart2P.ttf", 14, gameMain.DefaultCnt.Get_RegKey("/strings/settings/number_formatting") + str(gameMain.DefaultCnt.Get_RegKey("/OPTIONS/format_numbers")), (240, 240, 240), ElementsX + 95, ElementsY + 102, gameMain.DefaultCnt.Get_RegKey("/OPTIONS/font_aa"))
def EventUpdate(event):
global OptionsScreen_DebugModeEnabled
global OptionsScreen_RandomWindowTitle
global OptionsScreen_NumberFormatting
OptionsScreen_DebugModeEnabled.Update(event)
OptionsScreen_RandomWindowTitle.Update(event)
OptionsScreen_NumberFormatting.Update(event) | en | 0.762131 | #!/usr/bin/python3.7 # Copyright 2020 Aragubas # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # -- Imports -- # # -- Debug Mode -- # # -- Random Title -- # # -- Number Formatting -- # | 1.953504 | 2 |
tests/test_toggle.py | ConnectionMaster/robotpy-wpilib-utilities | 14 | 432 | from robotpy_ext.control.toggle import Toggle
from robotpy_ext.misc.precise_delay import NotifierDelay
class FakeJoystick:
def __init__(self):
self._pressed = [False] * 2
def getRawButton(self, num):
return self._pressed[num]
def press(self, num):
self._pressed[num] = True
def release(self, num):
self._pressed[num] = False
def test_toggle():
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 0)
toggleButton2 = Toggle(joystick, 1)
assert toggleButton.off
joystick.press(0)
assert toggleButton.on
assert toggleButton2.off
joystick.release(0)
assert toggleButton.on
joystick.press(0)
assert toggleButton.off
joystick.release(0)
assert toggleButton.off
joystick.press(1)
assert toggleButton.off
assert toggleButton2.on
def test_toggle_debounce():
# TODO: use simulated time
delay = NotifierDelay(0.5)
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 1, 0.1)
assert toggleButton.off
joystick.press(1)
assert toggleButton.on
joystick.release(1)
joystick.press(1)
joystick.release(1)
assert toggleButton.on
delay.wait()
assert toggleButton.on
joystick.press(1)
assert toggleButton.off
| from robotpy_ext.control.toggle import Toggle
from robotpy_ext.misc.precise_delay import NotifierDelay
class FakeJoystick:
def __init__(self):
self._pressed = [False] * 2
def getRawButton(self, num):
return self._pressed[num]
def press(self, num):
self._pressed[num] = True
def release(self, num):
self._pressed[num] = False
def test_toggle():
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 0)
toggleButton2 = Toggle(joystick, 1)
assert toggleButton.off
joystick.press(0)
assert toggleButton.on
assert toggleButton2.off
joystick.release(0)
assert toggleButton.on
joystick.press(0)
assert toggleButton.off
joystick.release(0)
assert toggleButton.off
joystick.press(1)
assert toggleButton.off
assert toggleButton2.on
def test_toggle_debounce():
# TODO: use simulated time
delay = NotifierDelay(0.5)
joystick = FakeJoystick()
toggleButton = Toggle(joystick, 1, 0.1)
assert toggleButton.off
joystick.press(1)
assert toggleButton.on
joystick.release(1)
joystick.press(1)
joystick.release(1)
assert toggleButton.on
delay.wait()
assert toggleButton.on
joystick.press(1)
assert toggleButton.off
| en | 0.485185 | # TODO: use simulated time | 3.010695 | 3 |
tests/test.py | kjanik70/tflearn | 10,882 | 433 | '''
This file contains test cases for tflearn
'''
import tensorflow.compat.v1 as tf
import tflearn
import unittest
class TestActivations(unittest.TestCase):
'''
This class contains test cases for the functions in tflearn/activations.py
'''
PLACES = 4 # Number of places to match when testing floating point values
def test_linear(self):
f = tflearn.linear
# Case 1
x = tf.placeholder(tf.float32, shape=())
self.assertEqual(f(x), x)
# Case 2
x = tf.placeholder(tf.int64, shape=())
self.assertEqual(f(x), x)
def test_tanh(self):
f = tflearn.tanh
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}),
0.4621, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}),
-0.2449, places=TestActivations.PLACES)
def test_leaky_relu(self):
f = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}),
1, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}),
-0.2, places=TestActivations.PLACES)
# Case 4
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}),
-1, places=TestActivations.PLACES)
def test_apply_activation(self):
lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.constant(-0.25, tf.float32)
with tf.Session() as sess:
# Case 1: 'linear'
self.assertEqual(
sess.run(tflearn.activation(x, 'linear')),
-0.25)
# Case 2: 'relu'
self.assertEqual(
sess.run(tflearn.activation(x, 'relu')),
0)
# Case 3: 'leaky_relu'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'leaky_relu')),
-0.025, places=TestActivations.PLACES)
# Case 4: 'tanh'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'tanh')),
-0.2449, places=TestActivations.PLACES)
# Case 5: lrelu_02 (callable)
self.assertAlmostEqual(
sess.run(tflearn.activation(x, lrelu_02)),
-0.05, places=TestActivations.PLACES)
if __name__ == "__main__":
unittest.main() | '''
This file contains test cases for tflearn
'''
import tensorflow.compat.v1 as tf
import tflearn
import unittest
class TestActivations(unittest.TestCase):
'''
This class contains test cases for the functions in tflearn/activations.py
'''
PLACES = 4 # Number of places to match when testing floating point values
def test_linear(self):
f = tflearn.linear
# Case 1
x = tf.placeholder(tf.float32, shape=())
self.assertEqual(f(x), x)
# Case 2
x = tf.placeholder(tf.int64, shape=())
self.assertEqual(f(x), x)
def test_tanh(self):
f = tflearn.tanh
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:0.5}),
0.4621, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-0.25}),
-0.2449, places=TestActivations.PLACES)
def test_leaky_relu(self):
f = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.placeholder(tf.float32, shape=())
with tf.Session() as sess:
# Case 1
self.assertEqual(sess.run(f(x), feed_dict={x:0}), 0)
# Case 2
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:1}),
1, places=TestActivations.PLACES)
# Case 3
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-1}),
-0.2, places=TestActivations.PLACES)
# Case 4
self.assertAlmostEqual(sess.run(f(x), feed_dict={x:-5}),
-1, places=TestActivations.PLACES)
def test_apply_activation(self):
lrelu_02 = lambda x: tflearn.leaky_relu(x, alpha=0.2)
x = tf.constant(-0.25, tf.float32)
with tf.Session() as sess:
# Case 1: 'linear'
self.assertEqual(
sess.run(tflearn.activation(x, 'linear')),
-0.25)
# Case 2: 'relu'
self.assertEqual(
sess.run(tflearn.activation(x, 'relu')),
0)
# Case 3: 'leaky_relu'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'leaky_relu')),
-0.025, places=TestActivations.PLACES)
# Case 4: 'tanh'
self.assertAlmostEqual(
sess.run(tflearn.activation(x, 'tanh')),
-0.2449, places=TestActivations.PLACES)
# Case 5: lrelu_02 (callable)
self.assertAlmostEqual(
sess.run(tflearn.activation(x, lrelu_02)),
-0.05, places=TestActivations.PLACES)
if __name__ == "__main__":
unittest.main() | en | 0.515902 | This file contains test cases for tflearn This class contains test cases for the functions in tflearn/activations.py # Number of places to match when testing floating point values # Case 1 # Case 2 # Case 1 # Case 2 # Case 3 # Case 1 # Case 2 # Case 3 # Case 4 # Case 1: 'linear' # Case 2: 'relu' # Case 3: 'leaky_relu' # Case 4: 'tanh' # Case 5: lrelu_02 (callable) | 2.772127 | 3 |
infrastructure-provisioning/src/general/api/install_libs.py | roolrd/incubator-datalab | 66 | 434 | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import os
import sys
import subprocess
if __name__ == "__main__":
success = True
try:
subprocess.run('cd /root; fab install-libs', shell=True, check=True)
except:
success = False
reply = dict()
reply['request_id'] = os.environ['request_id']
if success:
reply['status'] = 'ok'
else:
reply['status'] = 'err'
reply['response'] = dict()
try:
with open("/root/result.json") as f:
reply['response']['result'] = json.loads(f.read())
except:
reply['response']['result'] = {"error": "Failed to open result.json"}
reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'],
os.environ['project_name'],
os.environ['request_id'])
with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id']), 'w') as response_file:
response_file.write(json.dumps(reply))
try:
subprocess.run('chmod 666 /response/*', shell=True, check=True)
except:
success = False
if not success:
sys.exit(1) | #!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import os
import sys
import subprocess
if __name__ == "__main__":
success = True
try:
subprocess.run('cd /root; fab install-libs', shell=True, check=True)
except:
success = False
reply = dict()
reply['request_id'] = os.environ['request_id']
if success:
reply['status'] = 'ok'
else:
reply['status'] = 'err'
reply['response'] = dict()
try:
with open("/root/result.json") as f:
reply['response']['result'] = json.loads(f.read())
except:
reply['response']['result'] = {"error": "Failed to open result.json"}
reply['response']['log'] = "/var/log/datalab/{0}/{0}_{1}_{2}.log".format(os.environ['conf_resource'],
os.environ['project_name'],
os.environ['request_id'])
with open("/response/{}_{}_{}.json".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id']), 'w') as response_file:
response_file.write(json.dumps(reply))
try:
subprocess.run('chmod 666 /response/*', shell=True, check=True)
except:
success = False
if not success:
sys.exit(1) | en | 0.774422 | #!/usr/bin/python3 # ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** | 1.844279 | 2 |
model_zoo/official/nlp/bert_thor/src/evaluation_config.py | GuoSuiming/mindspore | 55 | 435 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '',
'schema_file': None,
'finetune_ckpt': '',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=8 if not cfg.clue_benchmark else 1,
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
config settings, will be used in finetune.py
"""
from easydict import EasyDict as edict
import mindspore.common.dtype as mstype
from .bert_model import BertConfig
cfg = edict({
'task': 'NER',
'num_labels': 41,
'data_file': '',
'schema_file': None,
'finetune_ckpt': '',
'use_crf': False,
'clue_benchmark': False,
})
bert_net_cfg = BertConfig(
batch_size=8 if not cfg.clue_benchmark else 1,
seq_length=512,
vocab_size=30522,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32,
compute_type=mstype.float16,
)
| en | 0.793332 | # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ config settings, will be used in finetune.py | 1.627908 | 2 |
portal/apps/core/management/commands/sync_articleviewedby.py | Artis-Physis/utopia-cms | 8 | 436 | # -*- coding: utf-8 -*-
# utopia-cms 2020. <NAME>.
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from apps import core_articleviewedby_mdb
from core.models import ArticleViewedBy
class Command(BaseCommand):
help = "Moves article viewed by data from mongodb to Django model"
def handle(self, *args, **options):
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
while mdb_view:
try:
avb = ArticleViewedBy.objects.get(article=mdb_view['article'], user=mdb_view['user'])
avb.viewed_at = mdb_view['viewed_at']
avb.save()
except ArticleViewedBy.DoesNotExist:
try:
ArticleViewedBy.objects.create(
article_id=mdb_view['article'], user_id=mdb_view['user'], viewed_at=mdb_view['viewed_at'])
except IntegrityError:
pass
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
| # -*- coding: utf-8 -*-
# utopia-cms 2020. <NAME>.
from django.core.management import BaseCommand
from django.db.utils import IntegrityError
from apps import core_articleviewedby_mdb
from core.models import ArticleViewedBy
class Command(BaseCommand):
help = "Moves article viewed by data from mongodb to Django model"
def handle(self, *args, **options):
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
while mdb_view:
try:
avb = ArticleViewedBy.objects.get(article=mdb_view['article'], user=mdb_view['user'])
avb.viewed_at = mdb_view['viewed_at']
avb.save()
except ArticleViewedBy.DoesNotExist:
try:
ArticleViewedBy.objects.create(
article_id=mdb_view['article'], user_id=mdb_view['user'], viewed_at=mdb_view['viewed_at'])
except IntegrityError:
pass
mdb_view = core_articleviewedby_mdb.posts.find_one_and_delete({})
| en | 0.587191 | # -*- coding: utf-8 -*- # utopia-cms 2020. <NAME>. | 2.096813 | 2 |
examples/minkunet.py | dendisuhubdy/MinkowskiEngine | 1 | 437 | import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.common import data_loader
from examples.resnet import ResNetBase
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels, out_channels, D=3):
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7],
out_channels,
kernel_size=1,
has_bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat((out, out_b3p8))
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat((out, out_b2p4))
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat((out, out_b1p2))
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat((out, out_p1))
out = self.block8(out)
return self.final(out)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
if __name__ == '__main__':
# loss and network
criterion = nn.CrossEntropyLoss()
net = MinkUNet14A(in_channels=3, out_channels=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader(is_classification=False)
input = ME.SparseTensor(feat, coords=coords).to(device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.common import data_loader
from examples.resnet import ResNetBase
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels, out_channels, D=3):
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4])
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5])
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6])
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7])
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7],
out_channels,
kernel_size=1,
has_bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat((out, out_b3p8))
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat((out, out_b2p4))
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat((out, out_b1p2))
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat((out, out_p1))
out = self.block8(out)
return self.final(out)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
if __name__ == '__main__':
# loss and network
criterion = nn.CrossEntropyLoss()
net = MinkUNet14A(in_channels=3, out_channels=5, D=2)
print(net)
# a data loader must return a tuple of coords, features, and labels.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = net.to(device)
optimizer = SGD(net.parameters(), lr=1e-2)
for i in range(10):
optimizer.zero_grad()
# Get new data
coords, feat, label = data_loader(is_classification=False)
input = ME.SparseTensor(feat, coords=coords).to(device)
label = label.to(device)
# Forward
output = net(input)
# Loss
loss = criterion(output.F, label)
print('Iteration: ', i, ', Loss: ', loss.item())
# Gradient
loss.backward()
optimizer.step()
# Saving and loading a network
torch.save(net.state_dict(), 'test.pth')
net.load_state_dict(torch.load('test.pth'))
| en | 0.680228 | # To use the model, must call initialize_coords before forward pass. # Once data is processed, call clear to reset the model before calling # initialize_coords # Output of the first conv concated to conv6 # tensor_stride=16 # tensor_stride=8 # tensor_stride=4 # tensor_stride=2 # tensor_stride=1 # loss and network # a data loader must return a tuple of coords, features, and labels. # Get new data # Forward # Loss # Gradient # Saving and loading a network | 2.718708 | 3 |
setup.py | swfrench/nginx-access-tailer | 0 | 438 | """TODO."""
from setuptools import setup
setup(
name='nginx-access-tailer',
version='0.1',
author='swfrench',
url='https://github.com/swfrench/nginx-tailer',
packages=['nginx_access_tailer',],
license='BSD three-clause license',
entry_points={
'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'],
},
install_requires=[
'python-gflags >= 3.1.1',
'google-cloud-monitoring >= 0.25.0',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
)
| """TODO."""
from setuptools import setup
setup(
name='nginx-access-tailer',
version='0.1',
author='swfrench',
url='https://github.com/swfrench/nginx-tailer',
packages=['nginx_access_tailer',],
license='BSD three-clause license',
entry_points={
'console_scripts': ['nginx-access-tailer = nginx_access_tailer.__main__:main'],
},
install_requires=[
'python-gflags >= 3.1.1',
'google-cloud-monitoring >= 0.25.0',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
)
| none | 1 | 1.10123 | 1 |
|
tests/integration/test_cmk_describe.py | oglok/CPU-Manager-for-Kubernetes | 0 | 439 | <filename>tests/integration/test_cmk_describe.py
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from . import integration
def test_cmk_describe_ok():
args = ["describe", "--conf-dir={}".format(helpers.conf_dir("ok"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/ok",
"pools": {
"exclusive": {
"cpuLists": {
"4,12": {
"cpus": "4,12",
"tasks": [
2000
]
},
"5,13": {
"cpus": "5,13",
"tasks": [
2001
]
},
"6,14": {
"cpus": "6,14",
"tasks": [
2002
]
},
"7,15": {
"cpus": "7,15",
"tasks": [
2003
]
}
},
"exclusive": true,
"name": "exclusive"
},
"infra": {
"cpuLists": {
"0-2,8-10": {
"cpus": "0-2,8-10",
"tasks": [
3000,
3001,
3002
]
}
},
"exclusive": false,
"name": "infra"
},
"shared": {
"cpuLists": {
"3,11": {
"cpus": "3,11",
"tasks": [
1000,
1001,
1002,
1003
]
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
def test_cmk_describe_minimal():
args = ["describe",
"--conf-dir={}".format(helpers.conf_dir("minimal"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/minimal",
"pools": {
"exclusive": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": true,
"name": "exclusive"
},
"shared": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
| <filename>tests/integration/test_cmk_describe.py
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from . import integration
def test_cmk_describe_ok():
args = ["describe", "--conf-dir={}".format(helpers.conf_dir("ok"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/ok",
"pools": {
"exclusive": {
"cpuLists": {
"4,12": {
"cpus": "4,12",
"tasks": [
2000
]
},
"5,13": {
"cpus": "5,13",
"tasks": [
2001
]
},
"6,14": {
"cpus": "6,14",
"tasks": [
2002
]
},
"7,15": {
"cpus": "7,15",
"tasks": [
2003
]
}
},
"exclusive": true,
"name": "exclusive"
},
"infra": {
"cpuLists": {
"0-2,8-10": {
"cpus": "0-2,8-10",
"tasks": [
3000,
3001,
3002
]
}
},
"exclusive": false,
"name": "infra"
},
"shared": {
"cpuLists": {
"3,11": {
"cpus": "3,11",
"tasks": [
1000,
1001,
1002,
1003
]
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
def test_cmk_describe_minimal():
args = ["describe",
"--conf-dir={}".format(helpers.conf_dir("minimal"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/minimal",
"pools": {
"exclusive": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": true,
"name": "exclusive"
},
"shared": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
| en | 0.57071 | # Copyright (c) 2017 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. { "path": "/cmk/tests/data/config/ok", "pools": { "exclusive": { "cpuLists": { "4,12": { "cpus": "4,12", "tasks": [ 2000 ] }, "5,13": { "cpus": "5,13", "tasks": [ 2001 ] }, "6,14": { "cpus": "6,14", "tasks": [ 2002 ] }, "7,15": { "cpus": "7,15", "tasks": [ 2003 ] } }, "exclusive": true, "name": "exclusive" }, "infra": { "cpuLists": { "0-2,8-10": { "cpus": "0-2,8-10", "tasks": [ 3000, 3001, 3002 ] } }, "exclusive": false, "name": "infra" }, "shared": { "cpuLists": { "3,11": { "cpus": "3,11", "tasks": [ 1000, 1001, 1002, 1003 ] } }, "exclusive": false, "name": "shared" } } } { "path": "/cmk/tests/data/config/minimal", "pools": { "exclusive": { "cpuLists": { "0": { "cpus": "0", "tasks": [] } }, "exclusive": true, "name": "exclusive" }, "shared": { "cpuLists": { "0": { "cpus": "0", "tasks": [] } }, "exclusive": false, "name": "shared" } } } | 2.056362 | 2 |
setup.py | richardARPANET/persistent-celery-beat-scheduler | 4 | 440 | #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='persistent-celery-beat-scheduler',
version='0.1.1.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
description=(
'Celery Beat Scheduler that stores the scheduler data in Redis.'
),
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*
import os
from setuptools import find_packages, setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='persistent-celery-beat-scheduler',
version='0.1.1.dev0',
packages=find_packages('src', exclude=('tests',)),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
description=(
'Celery Beat Scheduler that stores the scheduler data in Redis.'
),
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2',
long_description='https://github.com/richardasaurus/persistent-celery-beat-scheduler',
install_requires=install_requires,
classifiers=[
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
],
)
| en | 0.610727 | #!/usr/bin/env python # -*- coding: utf-8 -* # allow setup.py to be run from any path | 1.412944 | 1 |
tests/test_client.py | mgobec/python-memcached | 1 | 441 | import collections
import unittest
import driver
from driver.protocol import *
_server = ('localhost', 11211)
_dead_retry = 30
_socket_timeout = 3
_max_receive_size = 4096
class MockConnection(object):
def __init__(self,
server=_server,
dead_retry=30,
socket_timeout=3):
self.server = server
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.closed = True
self.socket = None
self.send_buffer = collections.deque()
self.receive_buffer = collections.deque()
self.on_read = None
self.on_write = None
def open(self):
self.closed = False
self.socket = True
return True
def close(self):
self.closed = True
self.socket = None
def send(self, data):
if self.on_write is not None:
self.on_write()
self.send_buffer.append(data)
def read(self, size=_max_receive_size):
if self.on_read is not None:
self.on_read()
return self.receive_buffer.popleft()
class ClientTests(unittest.TestCase):
def setUp(self):
self.client = driver.Client(_server)
self.mock = MockConnection()
self.client._connection = self.mock
self.client.connect()
def test_initialize_and_connect(self):
self.assertFalse(self.mock.closed)
def test_disconnect(self):
self.client.disconnect()
self.assertTrue(self.mock.closed)
def test_set_value_without_response(self):
self.client.set('testkey', 'testvalue')
self.assertEqual(self.mock.send_buffer.pop(), b'set testkey 0 0 9 noreply\r\ntestvalue\r\n')
def test_set_value_with_stored_response(self):
self.mock.receive_buffer.append(StoreReply.STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertTrue(response)
def test_set_value_with_not_stored_response(self):
self.mock.receive_buffer.append(StoreReply.NOT_STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_exists_response(self):
self.mock.receive_buffer.append(StoreReply.EXISTS + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_error_response(self):
self.mock.receive_buffer.append(Errors.ERROR + Constants.END_LINE)
with self.assertRaises(driver.DriverUnknownException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_server_error_response(self):
self.mock.receive_buffer.append(Errors.SERVER_ERROR + b' Test server error' + Constants.END_LINE)
with self.assertRaises(driver.DriverServerException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_client_error_response(self):
self.mock.receive_buffer.append(Errors.CLIENT_ERROR + b' Test client error' + Constants.END_LINE)
with self.assertRaises(driver.DriverClientException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_exception(self):
error_message = "Test write exception"
self.mock.on_write = lambda: _raise_exception(error_message)
result = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(result)
def test_get_value_exception(self):
error_message = "Test read exception"
self.mock.on_read = lambda: _raise_exception(error_message)
result = self.client.get('testkey')
self.assertIsNone(result)
def _raise_exception(message):
raise Exception(message)
| import collections
import unittest
import driver
from driver.protocol import *
_server = ('localhost', 11211)
_dead_retry = 30
_socket_timeout = 3
_max_receive_size = 4096
class MockConnection(object):
def __init__(self,
server=_server,
dead_retry=30,
socket_timeout=3):
self.server = server
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.closed = True
self.socket = None
self.send_buffer = collections.deque()
self.receive_buffer = collections.deque()
self.on_read = None
self.on_write = None
def open(self):
self.closed = False
self.socket = True
return True
def close(self):
self.closed = True
self.socket = None
def send(self, data):
if self.on_write is not None:
self.on_write()
self.send_buffer.append(data)
def read(self, size=_max_receive_size):
if self.on_read is not None:
self.on_read()
return self.receive_buffer.popleft()
class ClientTests(unittest.TestCase):
def setUp(self):
self.client = driver.Client(_server)
self.mock = MockConnection()
self.client._connection = self.mock
self.client.connect()
def test_initialize_and_connect(self):
self.assertFalse(self.mock.closed)
def test_disconnect(self):
self.client.disconnect()
self.assertTrue(self.mock.closed)
def test_set_value_without_response(self):
self.client.set('testkey', 'testvalue')
self.assertEqual(self.mock.send_buffer.pop(), b'set testkey 0 0 9 noreply\r\ntestvalue\r\n')
def test_set_value_with_stored_response(self):
self.mock.receive_buffer.append(StoreReply.STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertTrue(response)
def test_set_value_with_not_stored_response(self):
self.mock.receive_buffer.append(StoreReply.NOT_STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_exists_response(self):
self.mock.receive_buffer.append(StoreReply.EXISTS + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_error_response(self):
self.mock.receive_buffer.append(Errors.ERROR + Constants.END_LINE)
with self.assertRaises(driver.DriverUnknownException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_server_error_response(self):
self.mock.receive_buffer.append(Errors.SERVER_ERROR + b' Test server error' + Constants.END_LINE)
with self.assertRaises(driver.DriverServerException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_client_error_response(self):
self.mock.receive_buffer.append(Errors.CLIENT_ERROR + b' Test client error' + Constants.END_LINE)
with self.assertRaises(driver.DriverClientException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_exception(self):
error_message = "Test write exception"
self.mock.on_write = lambda: _raise_exception(error_message)
result = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(result)
def test_get_value_exception(self):
error_message = "Test read exception"
self.mock.on_read = lambda: _raise_exception(error_message)
result = self.client.get('testkey')
self.assertIsNone(result)
def _raise_exception(message):
raise Exception(message)
| none | 1 | 2.722855 | 3 |
|
GREYATOM-PROJECT----DATA--WRANGLING-WITH-PANDAS/code.py | Preethinaidu14/greyatom-python-for-data-science | 0 | 442 | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
path
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis = 1)
print(banks)
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks = banks.fillna(bank_mode)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts()
#print(loan_approved_se)
loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts()
print(loan_approved_nse)
Loan_Status = 614
percentage_se = (56/Loan_Status)*100
percentage_nse = (366/Loan_Status)*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12)
print(loan_term.value_counts())
big_loan = [i for i in loan_term if i >= 25]
big_loan_term = len(big_loan)
print(big_loan_term)
#[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
# code ends here
| # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
path
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print(categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID',axis = 1)
print(banks)
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks = banks.fillna(bank_mode)
#code ends here
# --------------
# Code starts here
avg_loan_amount = banks.pivot_table(index=['Gender','Married','Self_Employed'],values = 'LoanAmount')
# code ends here
# --------------
# code starts here
loan_approved_se = ((banks['Self_Employed']=='Yes') & (banks['Loan_Status']=='Y')).value_counts()
#print(loan_approved_se)
loan_approved_nse = ((banks['Self_Employed']=='No') & (banks['Loan_Status']=='Y')).value_counts()
print(loan_approved_nse)
Loan_Status = 614
percentage_se = (56/Loan_Status)*100
percentage_nse = (366/Loan_Status)*100
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply (lambda x : int(x)/12)
print(loan_term.value_counts())
big_loan = [i for i in loan_term if i >= 25]
big_loan_term = len(big_loan)
print(big_loan_term)
#[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
# code ends here
| en | 0.503859 | # -------------- # Import packages # code starts here # code ends here # -------------- # code starts here #code ends here # -------------- # Code starts here # code ends here # -------------- # code starts here #print(loan_approved_se) # code ends here # -------------- # code starts here #[loan_term.value_counts()[i] for i in range(len(loan_terms)) if loan_term.value_counts().index[i] >= 25] # code ends here # -------------- # code starts here # code ends here | 2.924137 | 3 |
venv/Lib/site-packages/patsy/test_regressions.py | EkremBayar/bayar | 710 | 443 | # This file is part of Patsy
# Copyright (C) 2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
| # This file is part of Patsy
# Copyright (C) 2013 <NAME> <<EMAIL>>
# See file LICENSE.txt for license information.
# Regression tests for fixed bugs (when not otherwise better covered somewhere
# else)
from patsy import (EvalEnvironment, dmatrix, build_design_matrices,
PatsyError, Origin)
def test_issue_11():
# Give a sensible error message for level mismatches
# (At some points we've failed to put an origin= on these errors)
env = EvalEnvironment.capture()
data = {"X" : [0,1,2,3], "Y" : [1,2,3,4]}
formula = "C(X) + Y"
new_data = {"X" : [0,0,1,2,3,3,4], "Y" : [1,2,3,4,5,6,7]}
info = dmatrix(formula, data)
try:
build_design_matrices([info.design_info], new_data)
except PatsyError as e:
assert e.origin == Origin(formula, 0, 4)
else:
assert False
| en | 0.77314 | # This file is part of Patsy # Copyright (C) 2013 <NAME> <<EMAIL>> # See file LICENSE.txt for license information. # Regression tests for fixed bugs (when not otherwise better covered somewhere # else) # Give a sensible error message for level mismatches # (At some points we've failed to put an origin= on these errors) | 2.149261 | 2 |
skimage/io/_plugins/pil_plugin.py | smheidrich/scikit-image | 3 | 444 | __all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| __all__ = ['imread', 'imsave']
import numpy as np
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
def imread(fname, dtype=None, img_num=None, **kwargs):
"""Load an image from file.
Parameters
----------
fname : str or file
File name or file-like-object.
dtype : numpy dtype object or string specifier
Specifies data type of array elements.
img_num : int, optional
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
Addition keyword arguments to pass through.
Notes
-----
Files are read using the Python Imaging Library.
See PIL docs [1]_ for a list of supported formats.
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
if isinstance(fname, str):
with open(fname, 'rb') as f:
im = Image.open(f)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
else:
im = Image.open(fname)
return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
def pil_to_ndarray(image, dtype=None, img_num=None):
"""Import a PIL Image object to an ndarray, in memory.
Parameters
----------
Refer to ``imread``.
"""
try:
# this will raise an IOError if the file is not readable
image.getdata()[0]
except IOError as e:
site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
pillow_error_message = str(e)
error_message = ('Could not load "%s" \n'
'Reason: "%s"\n'
'Please see documentation at: %s'
% (image.filename, pillow_error_message, site))
raise ValueError(error_message)
frames = []
grayscale = None
i = 0
while 1:
try:
image.seek(i)
except EOFError:
break
frame = image
if img_num is not None and img_num != i:
image.getdata()[0]
i += 1
continue
if image.format == 'PNG' and image.mode == 'I' and dtype is None:
dtype = 'uint16'
if image.mode == 'P':
if grayscale is None:
grayscale = _palette_is_grayscale(image)
if grayscale:
frame = image.convert('L')
else:
if image.format == 'PNG' and 'transparency' in image.info:
frame = image.convert('RGBA')
else:
frame = image.convert('RGB')
elif image.mode == '1':
frame = image.convert('L')
elif 'A' in image.mode:
frame = image.convert('RGBA')
elif image.mode == 'CMYK':
frame = image.convert('RGB')
if image.mode.startswith('I;16'):
shape = image.size
dtype = '>u2' if image.mode.endswith('B') else '<u2'
if 'S' in image.mode:
dtype = dtype.replace('u', 'i')
frame = np.fromstring(frame.tobytes(), dtype)
frame.shape = shape[::-1]
else:
frame = np.array(frame, dtype=dtype)
frames.append(frame)
i += 1
if img_num is not None:
break
if hasattr(image, 'fp') and image.fp:
image.fp.close()
if img_num is None and len(frames) > 1:
return np.array(frames)
elif frames:
return frames[0]
elif img_num:
raise IndexError('Could not find image #%s' % img_num)
def _palette_is_grayscale(pil_image):
"""Return True if PIL image in palette mode is grayscale.
Parameters
----------
pil_image : PIL image
PIL Image that is in Palette mode.
Returns
-------
is_grayscale : bool
True if all colors in image palette are gray.
"""
assert pil_image.mode == 'P'
# get palette as an array with R, G, B columns
palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
# Not all palette colors are used; unused colors have junk values.
start, stop = pil_image.getextrema()
valid_palette = palette[start:stop + 1]
# Image is grayscale if channel differences (R - G and G - B)
# are all zero.
return np.allclose(np.diff(valid_palette), 0)
def ndarray_to_pil(arr, format_str=None):
"""Export an ndarray to a PIL object.
Parameters
----------
Refer to ``imsave``.
"""
if arr.ndim == 3:
arr = img_as_ubyte(arr)
mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
elif format_str in ['png', 'PNG']:
mode = 'I;16'
mode_base = 'I'
if arr.dtype.kind == 'f':
arr = img_as_uint(arr)
elif arr.max() < 256 and arr.min() >= 0:
arr = arr.astype(np.uint8)
mode = mode_base = 'L'
else:
arr = img_as_uint(arr)
else:
arr = img_as_ubyte(arr)
mode = 'L'
mode_base = 'L'
try:
array_buffer = arr.tobytes()
except AttributeError:
array_buffer = arr.tostring() # Numpy < 1.9
if arr.ndim == 2:
im = Image.new(mode_base, arr.T.shape)
try:
im.frombytes(array_buffer, 'raw', mode)
except AttributeError:
im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
else:
image_shape = (arr.shape[1], arr.shape[0])
try:
im = Image.frombytes(mode, image_shape, array_buffer)
except AttributeError:
im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
return im
def imsave(fname, arr, format_str=None, **kwargs):
"""Save an image to disk.
Parameters
----------
fname : str or file-like object
Name of destination file.
arr : ndarray of uint8 or float
Array (image) to save. Arrays of data-type uint8 should have
values in [0, 255], whereas floating-point arrays must be
in [0, 1].
format_str: str
Format to save as, this is defaulted to PNG if using a file-like
object; this will be derived from the extension if fname is a string
kwargs: dict
Keyword arguments to the Pillow save function (or tifffile save
function, for Tiff files). These are format dependent. For example,
Pillow's JPEG save function supports an integer ``quality`` argument
with values in [1, 95], while TIFFFile supports a ``compress``
integer argument with values in [0, 9].
Notes
-----
Use the Python Imaging Library.
See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
- Floating point and other integers -> img_as_uint16
References
----------
.. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, str) and format_str is None:
format_str = "PNG"
# Check for png in filename
if (isinstance(fname, str)
and fname.lower().endswith(".png")):
format_str = "PNG"
arr = np.asanyarray(arr)
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % (arr.shape, ))
if arr.ndim == 3:
if arr.shape[2] not in (3, 4):
raise ValueError("Invalid number of channels in image array.")
img = ndarray_to_pil(arr, format_str=format_str)
img.save(fname, format=format_str, **kwargs)
| en | 0.570593 | Load an image from file. Parameters ---------- fname : str or file File name or file-like-object. dtype : numpy dtype object or string specifier Specifies data type of array elements. img_num : int, optional Specifies which image to read in a file with multiple images (zero-indexed). kwargs : keyword pairs, optional Addition keyword arguments to pass through. Notes ----- Files are read using the Python Imaging Library. See PIL docs [1]_ for a list of supported formats. References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html Import a PIL Image object to an ndarray, in memory. Parameters ---------- Refer to ``imread``. # this will raise an IOError if the file is not readable #external-libraries" #%s' % img_num) Return True if PIL image in palette mode is grayscale. Parameters ---------- pil_image : PIL image PIL Image that is in Palette mode. Returns ------- is_grayscale : bool True if all colors in image palette are gray. # get palette as an array with R, G, B columns # Not all palette colors are used; unused colors have junk values. # Image is grayscale if channel differences (R - G and G - B) # are all zero. Export an ndarray to a PIL object. Parameters ---------- Refer to ``imsave``. # Numpy < 1.9 # PIL 1.1.7 # PIL 1.1.7 Save an image to disk. Parameters ---------- fname : str or file-like object Name of destination file. arr : ndarray of uint8 or float Array (image) to save. Arrays of data-type uint8 should have values in [0, 255], whereas floating-point arrays must be in [0, 1]. format_str: str Format to save as, this is defaulted to PNG if using a file-like object; this will be derived from the extension if fname is a string kwargs: dict Keyword arguments to the Pillow save function (or tifffile save function, for Tiff files). These are format dependent. For example, Pillow's JPEG save function supports an integer ``quality`` argument with values in [1, 95], while TIFFFile supports a ``compress`` integer argument with values in [0, 9]. Notes ----- Use the Python Imaging Library. See PIL docs [1]_ for a list of other supported formats. All images besides single channel PNGs are converted using `img_as_uint8`. Single Channel PNGs have the following behavior: - Integer values in [0, 255] and Boolean types -> img_as_uint8 - Floating point and other integers -> img_as_uint16 References ---------- .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html # default to PNG if file-like object # Check for png in filename | 3.362283 | 3 |
examples/tellurium-files/linearChain.py | ShaikAsifullah/distributed-tellurium | 1 | 445 | # -*- coding: utf-8 -*-
"""
Linear chain of reactions.
"""
from __future__ import print_function, division
import tellurium as te
model = '''
model feedback()
// Reactions:
J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);
J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);
J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);
J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);
J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);
// Species initializations:
S1 = 0; S2 = 0; S3 = 0;
S4 = 0; X0 = 10; X1 = 0;
// Variable initialization:
VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;
end'''
r = te.loada(model)
result = r.simulate(0, 40, 500)
r.plotWithLegend(result)
| # -*- coding: utf-8 -*-
"""
Linear chain of reactions.
"""
from __future__ import print_function, division
import tellurium as te
model = '''
model feedback()
// Reactions:
J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);
J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);
J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);
J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);
J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);
// Species initializations:
S1 = 0; S2 = 0; S3 = 0;
S4 = 0; X0 = 10; X1 = 0;
// Variable initialization:
VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;
end'''
r = te.loada(model)
result = r.simulate(0, 40, 500)
r.plotWithLegend(result)
| en | 0.378554 | # -*- coding: utf-8 -*- Linear chain of reactions. model feedback() // Reactions: J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h); J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2); J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3); J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4); J4: S4 -> $X1; (V4 * S4) / (KS4 + S4); // Species initializations: S1 = 0; S2 = 0; S3 = 0; S4 = 0; X0 = 10; X1 = 0; // Variable initialization: VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5; end | 2.775736 | 3 |
backend/app/schemas/__init__.py | kommurisaikumar/savings-manager-server | 0 | 446 | from .users import User, UserCreate, UserUpdate
from .transactions import Transaction, TransactionCreate, TransactionUpdate
from .accounts import Account, AccountList, AccountSingle, AccountCreate, AccountUpdate
from .categories import Category, CategoryCreate, CategoryUpdate | from .users import User, UserCreate, UserUpdate
from .transactions import Transaction, TransactionCreate, TransactionUpdate
from .accounts import Account, AccountList, AccountSingle, AccountCreate, AccountUpdate
from .categories import Category, CategoryCreate, CategoryUpdate | none | 1 | 1.011203 | 1 |
|
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py | ciskoinch8/vimrc | 463 | 447 | <filename>vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
class Foo:
pass
class Bar(Foo):
def __init__(self):
super(Bar, self).__init__() # [super-with-arguments]
class Baz(Foo):
def __init__(self):
super().__init__()
class Qux(Foo):
def __init__(self):
super(Bar, self).__init__()
class NotSuperCall(Foo):
def __init__(self):
super.test(Bar, self).__init__()
class InvalidSuperCall(Foo):
def __init__(self):
super(InvalidSuperCall.__class__, self).__init__()
def method_accepting_cls(cls, self):
# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
super(cls, self).__init__()
| <filename>vimfiles/bundle/vim-python/submodules/pylint/tests/functional/s/super/super_with_arguments.py
class Foo:
pass
class Bar(Foo):
def __init__(self):
super(Bar, self).__init__() # [super-with-arguments]
class Baz(Foo):
def __init__(self):
super().__init__()
class Qux(Foo):
def __init__(self):
super(Bar, self).__init__()
class NotSuperCall(Foo):
def __init__(self):
super.test(Bar, self).__init__()
class InvalidSuperCall(Foo):
def __init__(self):
super(InvalidSuperCall.__class__, self).__init__()
def method_accepting_cls(cls, self):
# Using plain `super()` is not valid here, since there's no `__class__` cell found
# (Exact exception would be 'RuntimeError: super(): __class__ cell not found')
# Instead, we expect to *not* see a warning about `super-with-arguments`.
# Explicitly passing `cls`, and `self` to `super()` is what's required.
super(cls, self).__init__()
| en | 0.863626 | # [super-with-arguments] # Using plain `super()` is not valid here, since there's no `__class__` cell found # (Exact exception would be 'RuntimeError: super(): __class__ cell not found') # Instead, we expect to *not* see a warning about `super-with-arguments`. # Explicitly passing `cls`, and `self` to `super()` is what's required. | 2.657774 | 3 |
machine.py | yukti07/Dell_Hire_hack | 0 | 448 | <gh_stars>0
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from flask import flash
import numpy as np
def check(X, clf):
# print("TTTTTTTTTTTTThis is XXXXXXXXXXXX")
# print(X)
X = np.array(X)
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
p = clf.predict(X)
t = ()
for x in p:
if x == 0:
a = 'No'
else:
a = 'Yes'
t = t+(a,)
return t
def analyze(df, clf):
feature_importances = pd.DataFrame(clf.feature_importances_, index=['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'],columns=['importance']).sort_values('importance',ascending=False)
feature_importances['x1'] = feature_importances.index
ax = feature_importances.plot.bar(x='x1', y='importance', rot=90)
plt.savefig('templates/graphs/raw/feature_importances.png', frameon=True)
intervals = [x for x in range(0, 22000, 2000)]
categories = ['<'+str(x) for x in range(2000, 22000, 2000)]
df1 = df
df1['Income_Categories'] = pd.cut(df.MonthlyIncome, intervals, labels=categories)
ax = sns.countplot(x="Income_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Monthly Income vs Attrition", xlabel="Income group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/MIvsAttr.png')
intervals = [x for x in range(18,63,3)]
categories = ['<'+str(x) for x in range(21,63,3)]
df1 = df
df1['Age_Categories'] = pd.cut(df.Age, intervals, labels=categories)
ax = sns.countplot(x="Age_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Age vs Attrition", xlabel="Age group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/AgevsAttr.png')
intervals = [x for x in range(0,32,2)]
categories = ['<'+str(x) for x in range(2,32,2)]
df1 = df
df1['Distance_from_home'] = pd.cut(df.DistanceFromHome, intervals, labels=categories)
ax = sns.countplot(x="Distance_from_home", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Distance from home vs Attrition", xlabel="Distance", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/DistanceFromHomevsAttr.png')
ax = sns.countplot(x="PercentSalaryHike", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Salary Hike Percentage vs Attrition", xlabel="Salary Hike Percentage", ylabel="Total")
plt.savefig('templates/graphs/raw/PercentSalaryHikevsAttr.png')
ax = sns.countplot(x="NumCompaniesWorked", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Previously Worked Companies vs Attrition", xlabel="Number Of Previously Worked Companies", ylabel="Total")
plt.savefig('templates/graphs/raw/NPWCvsAttr.png')
intervals = [x for x in range(0,22,2)]
categories = ['<'+str(x) for x in range(2,22,2)]
df1 = df
df1['Current_Role'] = pd.cut(df.YearsInCurrentRole, intervals, labels=categories)
ax = sns.countplot(x="Current_Role", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Years in Current Role vs Attrition", xlabel="Number Of Years in Current Role", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YICRvsAttr.png')
ax = sns.countplot(x="OverTime", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Over Time vs Attrition", xlabel="Over Time", ylabel="Total")
plt.savefig('templates/graphs/raw/OverTimevsAttr.png')
ax = sns.countplot(x="JobRole", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Job Role vs Attrition", xlabel="Job Role", ylabel="Total")
plt.xticks(rotation=70)
plt.savefig('templates/graphs/raw/JobRolevsAttr.png')
intervals = [x for x in range(0,18,2)]
categories = ['<'+str(x) for x in range(2,18,2)]
df1 = df
df1['Promotion'] = pd.cut(df.YearsSinceLastPromotion, intervals, labels=categories)
ax = sns.countplot(x="Promotion", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number of Years since Promotion vs Attrition", xlabel="Number of Years since Promotion", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YSCPvsAttr.png')
ax = sns.countplot(x="MaritalStatus", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Marital Status vs Attrition", xlabel="Marital Status", ylabel="Total")
plt.savefig('templates/graphs/raw/MSvsAttr.png')
def run(data):
df = pd.read_csv('original_dataset.csv')
skills = df['Skills'].tolist()
# print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS")
# print(skills)
df = df.drop(['DailyRate', 'EmployeeCount', 'YearsAtCompany', 'TotalWorkingYears', 'JobLevel', 'HourlyRate', 'MonthlyRate', 'Over18', 'StandardHours', 'EnvironmentSatisfaction', 'JobInvolvement', 'PerformanceRating', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'YearsWithCurrManager'], axis=1)
df = df[['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']]
#print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
#print(skills)
X = df.iloc[:, 1:].values
y = df.iloc[:, 0].values
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
X = X.astype(float)
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40,random_state=0)
clf = RandomForestClassifier(n_estimators=200)
clf.fit(X_train,y_train)
p = clf.predict(X_test)
acc = accuracy_score(y_test,p)*100
flash(acc)
X = [list(elem) for elem in data]
[r.pop(0) for r in X]
#print("####### THIS IS XXXX##########")
#print(X)
att = check(X, clf)
skills = skills[:(len(att)):]
print("LLLLLLLLLLLLLLLENGHT" + str(len(att)) +" " + str(len(skills)))
i = 0
for row in att:
X[i].insert(0, row)
i = i+1
df1 = pd.DataFrame(X)
df1.columns=['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']
analyze(df, clf)
df1.to_csv('dataset1.csv')
return att, skills
| import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from flask import flash
import numpy as np
def check(X, clf):
# print("TTTTTTTTTTTTThis is XXXXXXXXXXXX")
# print(X)
X = np.array(X)
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
p = clf.predict(X)
t = ()
for x in p:
if x == 0:
a = 'No'
else:
a = 'Yes'
t = t+(a,)
return t
def analyze(df, clf):
feature_importances = pd.DataFrame(clf.feature_importances_, index=['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion'],columns=['importance']).sort_values('importance',ascending=False)
feature_importances['x1'] = feature_importances.index
ax = feature_importances.plot.bar(x='x1', y='importance', rot=90)
plt.savefig('templates/graphs/raw/feature_importances.png', frameon=True)
intervals = [x for x in range(0, 22000, 2000)]
categories = ['<'+str(x) for x in range(2000, 22000, 2000)]
df1 = df
df1['Income_Categories'] = pd.cut(df.MonthlyIncome, intervals, labels=categories)
ax = sns.countplot(x="Income_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Monthly Income vs Attrition", xlabel="Income group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/MIvsAttr.png')
intervals = [x for x in range(18,63,3)]
categories = ['<'+str(x) for x in range(21,63,3)]
df1 = df
df1['Age_Categories'] = pd.cut(df.Age, intervals, labels=categories)
ax = sns.countplot(x="Age_Categories", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Age vs Attrition", xlabel="Age group", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/AgevsAttr.png')
intervals = [x for x in range(0,32,2)]
categories = ['<'+str(x) for x in range(2,32,2)]
df1 = df
df1['Distance_from_home'] = pd.cut(df.DistanceFromHome, intervals, labels=categories)
ax = sns.countplot(x="Distance_from_home", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Distance from home vs Attrition", xlabel="Distance", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/DistanceFromHomevsAttr.png')
ax = sns.countplot(x="PercentSalaryHike", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Salary Hike Percentage vs Attrition", xlabel="Salary Hike Percentage", ylabel="Total")
plt.savefig('templates/graphs/raw/PercentSalaryHikevsAttr.png')
ax = sns.countplot(x="NumCompaniesWorked", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Previously Worked Companies vs Attrition", xlabel="Number Of Previously Worked Companies", ylabel="Total")
plt.savefig('templates/graphs/raw/NPWCvsAttr.png')
intervals = [x for x in range(0,22,2)]
categories = ['<'+str(x) for x in range(2,22,2)]
df1 = df
df1['Current_Role'] = pd.cut(df.YearsInCurrentRole, intervals, labels=categories)
ax = sns.countplot(x="Current_Role", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number Of Years in Current Role vs Attrition", xlabel="Number Of Years in Current Role", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YICRvsAttr.png')
ax = sns.countplot(x="OverTime", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Over Time vs Attrition", xlabel="Over Time", ylabel="Total")
plt.savefig('templates/graphs/raw/OverTimevsAttr.png')
ax = sns.countplot(x="JobRole", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Job Role vs Attrition", xlabel="Job Role", ylabel="Total")
plt.xticks(rotation=70)
plt.savefig('templates/graphs/raw/JobRolevsAttr.png')
intervals = [x for x in range(0,18,2)]
categories = ['<'+str(x) for x in range(2,18,2)]
df1 = df
df1['Promotion'] = pd.cut(df.YearsSinceLastPromotion, intervals, labels=categories)
ax = sns.countplot(x="Promotion", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Number of Years since Promotion vs Attrition", xlabel="Number of Years since Promotion", ylabel="Total")
plt.xticks(rotation=-30)
plt.savefig('templates/graphs/raw/YSCPvsAttr.png')
ax = sns.countplot(x="MaritalStatus", hue="Attrition", palette="Set1", data=df1)
ax.set(title="Marital Status vs Attrition", xlabel="Marital Status", ylabel="Total")
plt.savefig('templates/graphs/raw/MSvsAttr.png')
def run(data):
df = pd.read_csv('original_dataset.csv')
skills = df['Skills'].tolist()
# print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS")
# print(skills)
df = df.drop(['DailyRate', 'EmployeeCount', 'YearsAtCompany', 'TotalWorkingYears', 'JobLevel', 'HourlyRate', 'MonthlyRate', 'Over18', 'StandardHours', 'EnvironmentSatisfaction', 'JobInvolvement', 'PerformanceRating', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'StockOptionLevel', 'WorkLifeBalance', 'YearsWithCurrManager'], axis=1)
df = df[['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']]
#print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS")
#print(skills)
X = df.iloc[:, 1:].values
y = df.iloc[:, 0].values
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
labelencoder_X_5 = LabelEncoder()
X[:, 5] = labelencoder_X_5.fit_transform(X[:, 5])
labelencoder_X_6 = LabelEncoder()
X[:, 6] = labelencoder_X_6.fit_transform(X[:, 6])
labelencoder_X_7 = LabelEncoder()
X[:, 7] = labelencoder_X_7.fit_transform(X[:, 7])
labelencoder_X_9 = LabelEncoder()
X[:, 9] = labelencoder_X_9.fit_transform(X[:, 9])
labelencoder_X_12 = LabelEncoder()
X[:, 12] = labelencoder_X_12.fit_transform(X[:, 12])
X = X.astype(float)
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.40,random_state=0)
clf = RandomForestClassifier(n_estimators=200)
clf.fit(X_train,y_train)
p = clf.predict(X_test)
acc = accuracy_score(y_test,p)*100
flash(acc)
X = [list(elem) for elem in data]
[r.pop(0) for r in X]
#print("####### THIS IS XXXX##########")
#print(X)
att = check(X, clf)
skills = skills[:(len(att)):]
print("LLLLLLLLLLLLLLLENGHT" + str(len(att)) +" " + str(len(skills)))
i = 0
for row in att:
X[i].insert(0, row)
i = i+1
df1 = pd.DataFrame(X)
df1.columns=['Attrition', 'Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField', 'Gender', 'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike', 'YearsInCurrentRole', 'YearsSinceLastPromotion']
analyze(df, clf)
df1.to_csv('dataset1.csv')
return att, skills | en | 0.181772 | # print("TTTTTTTTTTTTThis is XXXXXXXXXXXX") # print(X) # print("SKKKKKKKKKKKKKKKILLLLLLLLLLLLLLLS") # print(skills) #print("These re SKILSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS") #print(skills) #print("####### THIS IS XXXX##########") #print(X) | 2.706262 | 3 |
TM-GCN-master/experiment_bitcoin_baseline_link_prediction.py | OsmanMalik/TM-GCN | 14 | 449 | # This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline
# The point of this script is to do link prediction
# Imports and aliases
import pickle
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
import datetime
from scipy.sparse import csr_matrix
import os.path
import embedding_help_functions as ehf
import scipy.io as sio
unsq = t.unsqueeze
sq = t.squeeze
# Settings
alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95]
no_layers = 1
dataset = "OTC" # OTC or Alpha
no_epochs = 1000
mat_f_name = "saved_content_bitcoin_otc.mat"
no_trials = 1
beta1 = 19
beta2 = 19
cutoff = 95
eval_type = "MAP-MRR" # "MAP-MRR" or "F1"
data_loc = "data/Bitcoin_" + dataset + "/"
S_train, S_val, S_test = 95, 20, 20
lr = 0.01
momentum = 0.9
# Load and return relevant data
A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)
# Create features for the nodes
X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False)
# Extract edges and labels from A_labels, and augment with nonexisting edges
# edges, beta
edges = A_labels._indices()
edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff)
# Divide adjacency matrices and labels into training, validation and testing sets
edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False)
if no_trials > 1:
ep_acc_loss_vec = []
for tr in range(no_trials):
for alpha in alpha_vec:
class_weights = t.tensor([alpha, 1.0-alpha])
save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction"
# Create gcn for training
if no_layers == 2:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu")
elif no_layers == 1:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2])
# Train
optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target)
if eval_type == "F1":
ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test)
elif eval_type == "MAP-MRR":
ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test)
for ep in range(no_epochs):
# Compute loss and take step
optimizer.zero_grad()
output_train = gcn()
loss_train = criterion(output_train, target_train[edges_train[0]!=0])
loss_train.backward()
optimizer.step()
# Things that don't require gradient
with t.no_grad():
if ep % 100 == 0:
# Compute stats for training data; no point in doing more often than this
guess_train = t.argmax(output_train, dim=1)
if eval_type == "F1":
precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0])
elif eval_type == "MAP-MRR":
MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0])
# Compute stats for validation data
output_val = gcn(C_val[:-1], X_val[:-1], e_val)
guess_val = t.argmax(output_val, dim=1)
if eval_type == "F1":
precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0])
elif eval_type == "MAP-MRR":
MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0])
loss_val = criterion(output_val, target_val[edges_val[0]!=0])
# Compute stats for test data
output_test = gcn(C_test[:-1], X_test[:-1], e_test)
guess_test = t.argmax(output_test, dim=1)
if eval_type == "F1":
precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0])
elif eval_type == "MAP-MRR":
MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0])
loss_test = criterion(output_test, target_test[edges_test[0]!=0])
# Print
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep)
elif eval_type == "MAP-MRR":
print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train))
print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val))
print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test))
# Store values with results
if eval_type == "F1":
ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test]
elif eval_type == "MAP-MRR":
ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test]
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True)
elif eval_type == "MAP-MRR":
print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train))
print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val))
print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test))
if no_trials == 1:
pickle.dump(ep_acc_loss, open(save_res_fname, "wb"))
print("Results saved for single trial")
else:
ep_acc_loss_vec.append(ep_acc_loss)
if no_trials > 1:
pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb"))
print("Results saved for all trials") | # This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline
# The point of this script is to do link prediction
# Imports and aliases
import pickle
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cProfile
import pandas as pd
import datetime
from scipy.sparse import csr_matrix
import os.path
import embedding_help_functions as ehf
import scipy.io as sio
unsq = t.unsqueeze
sq = t.squeeze
# Settings
alpha_vec = [.75, .76, .77, .78, .79, .80, .81, .82, .83, .84, .85, .86, .87, .88, .89, .90, .91, .92, .93, .94, .95]
no_layers = 1
dataset = "OTC" # OTC or Alpha
no_epochs = 1000
mat_f_name = "saved_content_bitcoin_otc.mat"
no_trials = 1
beta1 = 19
beta2 = 19
cutoff = 95
eval_type = "MAP-MRR" # "MAP-MRR" or "F1"
data_loc = "data/Bitcoin_" + dataset + "/"
S_train, S_val, S_test = 95, 20, 20
lr = 0.01
momentum = 0.9
# Load and return relevant data
A, A_labels, C_train, C_val, C_test, N = ehf.load_data(data_loc, mat_f_name, S_train, S_val, S_test, transformed=False)
# Create features for the nodes
X_train, X_val, X_test = ehf.create_node_features(A, S_train, S_val, S_test, same_block_size=False)
# Extract edges and labels from A_labels, and augment with nonexisting edges
# edges, beta
edges = A_labels._indices()
edges_aug, labels = ehf.augment_edges(edges, N, beta1, beta2, cutoff)
# Divide adjacency matrices and labels into training, validation and testing sets
edges_train, target_train, e_train, edges_val, target_val, e_val, edges_test, target_test, e_test = ehf.split_data(edges_aug, labels, S_train, S_val, S_test, same_block_size = False)
if no_trials > 1:
ep_acc_loss_vec = []
for tr in range(no_trials):
for alpha in alpha_vec:
class_weights = t.tensor([alpha, 1.0-alpha])
save_res_fname = "results_BASELINE_layers" + str(no_layers) + "_w" + str(round(float(class_weights[0])*100)) + "_" + dataset + "_link_prediction"
# Create gcn for training
if no_layers == 2:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,6,2], nonlin2="selu")
elif no_layers == 1:
gcn = ehf.EmbeddingKWGCN(C_train[:-1], X_train[:-1], e_train, [6,2])
# Train
optimizer = t.optim.SGD(gcn.parameters(), lr=lr, momentum=momentum)
criterion = nn.CrossEntropyLoss(weight=class_weights) # Takes arguments (output, target)
if eval_type == "F1":
ep_acc_loss = np.zeros((no_epochs,12)) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test)
elif eval_type == "MAP-MRR":
ep_acc_loss = np.zeros((no_epochs,9)) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test)
for ep in range(no_epochs):
# Compute loss and take step
optimizer.zero_grad()
output_train = gcn()
loss_train = criterion(output_train, target_train[edges_train[0]!=0])
loss_train.backward()
optimizer.step()
# Things that don't require gradient
with t.no_grad():
if ep % 100 == 0:
# Compute stats for training data; no point in doing more often than this
guess_train = t.argmax(output_train, dim=1)
if eval_type == "F1":
precision_train, recall_train, f1_train = ehf.compute_f1(guess_train, target_train[edges_train[0]!=0])
elif eval_type == "MAP-MRR":
MAP_train, MRR_train = ehf.compute_MAP_MRR(output_train, target_train[edges_train[0]!=0], edges_train[:, edges_train[0]!=0])
# Compute stats for validation data
output_val = gcn(C_val[:-1], X_val[:-1], e_val)
guess_val = t.argmax(output_val, dim=1)
if eval_type == "F1":
precision_val, recall_val, f1_val = ehf.compute_f1(guess_val, target_val[edges_val[0]!=0])
elif eval_type == "MAP-MRR":
MAP_val, MRR_val = ehf.compute_MAP_MRR(output_val, target_val[edges_val[0]!=0], edges_val[:, edges_val[0]!=0])
loss_val = criterion(output_val, target_val[edges_val[0]!=0])
# Compute stats for test data
output_test = gcn(C_test[:-1], X_test[:-1], e_test)
guess_test = t.argmax(output_test, dim=1)
if eval_type == "F1":
precision_test, recall_test, f1_test = ehf.compute_f1(guess_test, target_test[edges_test[0]!=0])
elif eval_type == "MAP-MRR":
MAP_test, MRR_test = ehf.compute_MAP_MRR(output_test, target_test[edges_test[0]!=0], edges_test[:, edges_test[0]!=0])
loss_test = criterion(output_test, target_test[edges_test[0]!=0])
# Print
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, alpha, tr, ep)
elif eval_type == "MAP-MRR":
print("alpha/Tr/Ep %.2f/%d/%d. Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (alpha, tr, ep, MAP_train, MRR_train, loss_train))
print("alpha/Tr/Ep %.2f/%d/%d. Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (alpha, tr, ep, MAP_val, MRR_val, loss_val))
print("alpha/Tr/Ep %.2f/%d/%d. Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (alpha, tr, ep, MAP_test, MRR_test, loss_test))
# Store values with results
if eval_type == "F1":
ep_acc_loss[ep] = [precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test]
elif eval_type == "MAP-MRR":
ep_acc_loss[ep] = [MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test]
if eval_type == "F1":
ehf.print_f1(precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test, is_final=True)
elif eval_type == "MAP-MRR":
print("FINAL: Train MAP/MRR %.16f/%.16f. Train loss %.16f." % (MAP_train, MRR_train, loss_train))
print("FINAL: Val MAP/MRR %.16f/%.16f. Val loss %.16f." % (MAP_val, MRR_val, loss_val))
print("FINAL: Test MAP/MRR %.16f/%.16f. Test loss %.16f.\n" % (MAP_test, MRR_test, loss_test))
if no_trials == 1:
pickle.dump(ep_acc_loss, open(save_res_fname, "wb"))
print("Results saved for single trial")
else:
ep_acc_loss_vec.append(ep_acc_loss)
if no_trials > 1:
pickle.dump(ep_acc_loss_vec, open(save_res_fname + "_no_trials" + str(no_trials), "wb"))
print("Results saved for all trials") | en | 0.812823 | # This version of the bitcoin experiment imports data preprocessed in Matlab, and uses the GCN baseline # The point of this script is to do link prediction # Imports and aliases # Settings # OTC or Alpha # "MAP-MRR" or "F1" # Load and return relevant data # Create features for the nodes # Extract edges and labels from A_labels, and augment with nonexisting edges # edges, beta # Divide adjacency matrices and labels into training, validation and testing sets # Create gcn for training # Train # Takes arguments (output, target) # (precision_train, recall_train, f1_train, loss_train, precision_val, recall_val, f1_val, loss_val, precision_test, recall_test, f1_test, loss_test) # (MAP_train, MRR_train, loss_train, MAP_val, MRR_val, loss_val, MAP_test, MRR_test, loss_test) # Compute loss and take step # Things that don't require gradient # Compute stats for training data; no point in doing more often than this # Compute stats for validation data # Compute stats for test data # Print # Store values with results | 2.2858 | 2 |
elm_mnist/elm_mnist.py | ahara/-blog | 0 | 450 | <reponame>ahara/-blog
import cPickle
import numpy as np
from elm import ELMClassifier
from sklearn import linear_model
def load_mnist(path='../Data/mnist.pkl'):
with open(path, 'rb') as f:
return cPickle.load(f)
def get_datasets(data):
_train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
_val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
_test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)
return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y
if __name__ == '__main__':
# Load data sets
train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
# Build ELM
cls = ELMClassifier(n_hidden=7000,
alpha=0.93,
activation_func='multiquadric',
regressor=linear_model.Ridge(),
random_state=21398023)
cls.fit(train_x, train_y)
# Evaluate model
print 'Validation error:', cls.score(val_x, val_y)
print 'Test error:', cls.score(test_x, test_y)
| import cPickle
import numpy as np
from elm import ELMClassifier
from sklearn import linear_model
def load_mnist(path='../Data/mnist.pkl'):
with open(path, 'rb') as f:
return cPickle.load(f)
def get_datasets(data):
_train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
_val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
_test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)
return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y
if __name__ == '__main__':
# Load data sets
train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
# Build ELM
cls = ELMClassifier(n_hidden=7000,
alpha=0.93,
activation_func='multiquadric',
regressor=linear_model.Ridge(),
random_state=21398023)
cls.fit(train_x, train_y)
# Evaluate model
print 'Validation error:', cls.score(val_x, val_y)
print 'Test error:', cls.score(test_x, test_y) | en | 0.569923 | # Load data sets # Build ELM # Evaluate model | 2.874759 | 3 |
Coding_Part/bob.py | qizhu8/CSCI6230-HW02 | 0 | 451 | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
from PKC_Classes import NetworkUser, KDC
from DES import DES
from RSA_Class import RSA
import socket
import os
import sys
import threading
import time
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
def reply_conn(conn, addr):
print('Accept new connection from user {0}'.format(addr));
#conn.settimeout(500)
# conn.send(b'Hi, This is bob. Waiting for your sess key')
buf = conn.recv(1024)
while True:
if buf:
receive_packet = bytes.decode(buf).rstrip('\x00')
reply_packet = bob.process_packet(receive_packet)
conn.send(reply_packet.encode())
buf = conn.recv(1024)
else:
time.sleep(0.5)
conn.close()
bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200)
print('bob:', bob.uid)
# socket communication
kdc_host, kdc_port = 'localhost', 9999
bob_host, bob_port = 'localhost', 9200
# talk to kdc for sess key
try:
sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_with_kdc.connect((kdc_host, kdc_port))
print(sock_with_kdc.recv(1024))
# send cipher_key
bob_cipher_key_packet = bob.send_cipher_key()
sock_with_kdc.send(bob_cipher_key_packet.encode())
kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode()
print(kdc_bob_cipher_key_packet)
bob.process_packet(kdc_bob_cipher_key_packet)
except socket.error as msg:
print(msg);
sys.exit(1)
# sock_with_kdc.shutdown(socket.SHUT_WR)
# talk to bob
try:
sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_self.bind((bob_host, bob_port))
sock_self.listen(10)
except socket.error as msg:
print(msg);
sys.exit(1)
while 1:
conn, addr = sock_self.accept()
thread = threading.Thread(target=reply_conn, args=(conn, addr))
thread.start()
# sock_self.close()
| # -*- coding: utf-8 -*-
#!/usr/bin/env python3
from PKC_Classes import NetworkUser, KDC
from DES import DES
from RSA_Class import RSA
import socket
import os
import sys
import threading
import time
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
def reply_conn(conn, addr):
print('Accept new connection from user {0}'.format(addr));
#conn.settimeout(500)
# conn.send(b'Hi, This is bob. Waiting for your sess key')
buf = conn.recv(1024)
while True:
if buf:
receive_packet = bytes.decode(buf).rstrip('\x00')
reply_packet = bob.process_packet(receive_packet)
conn.send(reply_packet.encode())
buf = conn.recv(1024)
else:
time.sleep(0.5)
conn.close()
bob = NetworkUser('Alice', DES(), RSA(9973, 97), 200)
print('bob:', bob.uid)
# socket communication
kdc_host, kdc_port = 'localhost', 9999
bob_host, bob_port = 'localhost', 9200
# talk to kdc for sess key
try:
sock_with_kdc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_with_kdc.connect((kdc_host, kdc_port))
print(sock_with_kdc.recv(1024))
# send cipher_key
bob_cipher_key_packet = bob.send_cipher_key()
sock_with_kdc.send(bob_cipher_key_packet.encode())
kdc_bob_cipher_key_packet = sock_with_kdc.recv(1024).decode()
print(kdc_bob_cipher_key_packet)
bob.process_packet(kdc_bob_cipher_key_packet)
except socket.error as msg:
print(msg);
sys.exit(1)
# sock_with_kdc.shutdown(socket.SHUT_WR)
# talk to bob
try:
sock_self = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_self.bind((bob_host, bob_port))
sock_self.listen(10)
except socket.error as msg:
print(msg);
sys.exit(1)
while 1:
conn, addr = sock_self.accept()
thread = threading.Thread(target=reply_conn, args=(conn, addr))
thread.start()
# sock_self.close()
| en | 0.626904 | # -*- coding: utf-8 -*- #!/usr/bin/env python3 #conn.settimeout(500) # conn.send(b'Hi, This is bob. Waiting for your sess key') # socket communication # talk to kdc for sess key # send cipher_key # sock_with_kdc.shutdown(socket.SHUT_WR) # talk to bob # sock_self.close() | 2.470222 | 2 |
proj/scripts/cluster/baselines/triplets_greyscale.py | zqma/IIC | 0 | 452 | <filename>proj/scripts/cluster/baselines/triplets_greyscale.py
from __future__ import print_function
import argparse
import itertools
import os
import pickle
import sys
from datetime import datetime
import matplotlib
import numpy as np
import torch
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import proj.archs as archs
from proj.utils.cluster.general import config_to_str, get_opt, update_lr
from proj.utils.cluster.baselines.triplets import make_triplets_data, \
triplets_eval, triplets_loss
"""
Triplets.
Makes output distribution same as that of attractor, and different to that
of repeller.
Greyscale version (no sobel).
"""
# Options ----------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--arch", type=str, required=True)
parser.add_argument("--opt", type=str, default="Adam")
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--dataset_root", type=str, required=True)
parser.add_argument("--gt_k", type=int, required=True)
parser.add_argument("--output_k", type=int, required=True)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--lr_schedule", type=int, nargs="+", default=[])
parser.add_argument("--lr_mult", type=float, default=0.1)
parser.add_argument("--num_epochs", type=int, default=1000)
parser.add_argument("--batch_sz", type=int, required=True) # num pairs
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--restart", dest="restart", default=False,
action="store_true")
parser.add_argument("--test_code", dest="test_code", default=False,
action="store_true")
parser.add_argument("--save_freq", type=int, default=10)
parser.add_argument("--kmeans_on_features", default=False,
action="store_true")
# transforms
# used for "positive" sample
parser.add_argument("--demean", dest="demean", default=False,
action="store_true")
parser.add_argument("--per_img_demean", dest="per_img_demean", default=False,
action="store_true")
parser.add_argument("--data_mean", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--data_std", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--crop_orig", dest="crop_orig", default=False,
action="store_true")
parser.add_argument("--crop_other", dest="crop_other", default=False,
action="store_true")
parser.add_argument("--tf1_crop", type=str, default="random") # type name
parser.add_argument("--tf2_crop", type=str, default="random")
parser.add_argument("--tf1_crop_sz", type=int, default=84)
parser.add_argument("--tf2_crop_szs", type=int, nargs="+",
default=[84]) # allow diff crop for imgs_tf
parser.add_argument("--tf3_crop_diff", dest="tf3_crop_diff", default=False,
action="store_true")
parser.add_argument("--tf3_crop_sz", type=int, default=0)
parser.add_argument("--input_sz", type=int, default=96)
parser.add_argument("--rot_val", type=float, default=0.)
parser.add_argument("--always_rot", dest="always_rot", default=False,
action="store_true")
parser.add_argument("--no_jitter", dest="no_jitter", default=False,
action="store_true")
parser.add_argument("--no_flip", dest="no_flip", default=False,
action="store_true")
config = parser.parse_args()
# Fixed settings and checks ----------------------------------------------------
config.in_channels = 1
if config.output_k != config.gt_k:
assert (config.output_k > config.gt_k)
assert (config.kmeans_on_features)
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
config.dataloader_batch_sz = config.batch_sz
config.num_dataloaders = 1
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if config.restart:
given_config = config
reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == given_config.model_ind)
config.restart = True
# copy over new num_epochs and lr schedule
config.num_epochs = given_config.num_epochs
config.lr_schedule = given_config.lr_schedule
if not hasattr(config, "kmeans_on_features"):
config.kmeans_on_features = False
else:
print("Config: %s" % config_to_str(config))
# Data, nets, optimisers -------------------------------------------------------
dataloader_original, dataloader_positive, dataloader_negative, \
dataloader_test = make_triplets_data(config)
train_dataloaders = [dataloader_original, dataloader_positive,
dataloader_negative]
net = archs.__dict__[config.arch](config)
if config.restart:
model_path = os.path.join(config.out_dir, "latest_net.pytorch")
taking_best = not os.path.exists(model_path)
if taking_best:
print("using best instead of latest")
model_path = os.path.join(config.out_dir, "best_net.pytorch")
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.train()
optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart:
opt_path = os.path.join(config.out_dir, "latest_optimiser.pytorch")
if taking_best:
opt_path = os.path.join(config.out_dir, "best_optimiser.pytorch")
optimiser.load_state_dict(torch.load(opt_path))
# Results storage --------------------------------------------------------------
if config.restart:
if not taking_best:
next_epoch = config.last_epoch + 1 # corresponds to last saved model
else:
next_epoch = np.argmax(np.array(config.epoch_acc)) + 1
print("starting from epoch %d" % next_epoch)
config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot
config.epoch_loss = config.epoch_loss[:next_epoch]
config.masses = config.masses[:next_epoch, :]
config.per_class_acc = config.per_class_acc[:next_epoch, :]
else:
config.epoch_acc = []
config.epoch_loss = []
config.masses = None
config.per_class_acc = None
_ = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Pre: time %s: \n %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
next_epoch = 1
fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20))
# Train ------------------------------------------------------------------------
for e_i in xrange(next_epoch, config.num_epochs):
print("Starting e_i: %d" % (e_i))
if e_i in config.lr_schedule:
optimiser = update_lr(optimiser, lr_mult=config.lr_mult)
avg_loss = 0. # over heads and head_epochs (and sub_heads)
avg_loss_count = 0
sys.stdout.flush()
iterators = (d for d in train_dataloaders)
b_i = 0
for tup in itertools.izip(*iterators):
net.module.zero_grad()
imgs_orig = tup[0][0].cuda()
imgs_pos = tup[1][0].cuda()
imgs_neg = tup[2][0].cuda()
outs_orig = net(imgs_orig)
outs_pos = net(imgs_pos)
outs_neg = net(imgs_neg)
curr_loss = triplets_loss(outs_orig, outs_pos, outs_neg)
if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10):
print("Model ind %d epoch %d batch %d "
"loss %f time %s" % \
(config.model_ind, e_i, b_i, curr_loss.item(), datetime.now()))
sys.stdout.flush()
if not np.isfinite(float(curr_loss.item())):
print("Loss is not finite... %s:" % str(curr_loss.item()))
exit(1)
avg_loss += curr_loss.item()
avg_loss_count += 1
curr_loss.backward()
optimiser.step()
b_i += 1
if b_i == 2 and config.test_code:
break
avg_loss = float(avg_loss / avg_loss_count)
config.epoch_loss.append(avg_loss)
# Eval and storage -----------------------------------------------------------
# when epoch over both heads is finished
is_best = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Time %s, acc %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
axarr[0].clear()
axarr[0].plot(config.epoch_acc)
axarr[0].set_title("acc, top: %f" % max(config.epoch_acc))
axarr[1].clear()
axarr[1].plot(config.epoch_loss)
axarr[1].set_title("Loss")
axarr[2].clear()
for c in xrange(config.gt_k):
axarr[2].plot(config.masses[:, c])
axarr[2].set_title("masses")
axarr[3].clear()
for c in xrange(config.gt_k):
axarr[3].plot(config.per_class_acc[:, c])
axarr[3].set_title("per_class_acc")
fig.tight_layout()
fig.canvas.draw_idle()
fig.savefig(os.path.join(config.out_dir, "plots.png"))
if is_best or (e_i % config.save_freq == 0):
net.module.cpu()
if is_best:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "best_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "best_optimiser.pytorch"))
if e_i % config.save_freq == 0:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "latest_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "latest_optimiser.pytorch"))
config.last_epoch = e_i # for last saved version
net.module.cuda()
with open(os.path.join(config.out_dir, "config.pickle"),
'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"),
"w") as text_file:
text_file.write("%s" % config)
if config.test_code:
exit(0)
| <filename>proj/scripts/cluster/baselines/triplets_greyscale.py
from __future__ import print_function
import argparse
import itertools
import os
import pickle
import sys
from datetime import datetime
import matplotlib
import numpy as np
import torch
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import proj.archs as archs
from proj.utils.cluster.general import config_to_str, get_opt, update_lr
from proj.utils.cluster.baselines.triplets import make_triplets_data, \
triplets_eval, triplets_loss
"""
Triplets.
Makes output distribution same as that of attractor, and different to that
of repeller.
Greyscale version (no sobel).
"""
# Options ----------------------------------------------------------------------
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--arch", type=str, required=True)
parser.add_argument("--opt", type=str, default="Adam")
parser.add_argument("--dataset", type=str, required=True)
parser.add_argument("--dataset_root", type=str, required=True)
parser.add_argument("--gt_k", type=int, required=True)
parser.add_argument("--output_k", type=int, required=True)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--lr_schedule", type=int, nargs="+", default=[])
parser.add_argument("--lr_mult", type=float, default=0.1)
parser.add_argument("--num_epochs", type=int, default=1000)
parser.add_argument("--batch_sz", type=int, required=True) # num pairs
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--restart", dest="restart", default=False,
action="store_true")
parser.add_argument("--test_code", dest="test_code", default=False,
action="store_true")
parser.add_argument("--save_freq", type=int, default=10)
parser.add_argument("--kmeans_on_features", default=False,
action="store_true")
# transforms
# used for "positive" sample
parser.add_argument("--demean", dest="demean", default=False,
action="store_true")
parser.add_argument("--per_img_demean", dest="per_img_demean", default=False,
action="store_true")
parser.add_argument("--data_mean", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--data_std", type=float, nargs="+",
default=[0.5, 0.5, 0.5])
parser.add_argument("--crop_orig", dest="crop_orig", default=False,
action="store_true")
parser.add_argument("--crop_other", dest="crop_other", default=False,
action="store_true")
parser.add_argument("--tf1_crop", type=str, default="random") # type name
parser.add_argument("--tf2_crop", type=str, default="random")
parser.add_argument("--tf1_crop_sz", type=int, default=84)
parser.add_argument("--tf2_crop_szs", type=int, nargs="+",
default=[84]) # allow diff crop for imgs_tf
parser.add_argument("--tf3_crop_diff", dest="tf3_crop_diff", default=False,
action="store_true")
parser.add_argument("--tf3_crop_sz", type=int, default=0)
parser.add_argument("--input_sz", type=int, default=96)
parser.add_argument("--rot_val", type=float, default=0.)
parser.add_argument("--always_rot", dest="always_rot", default=False,
action="store_true")
parser.add_argument("--no_jitter", dest="no_jitter", default=False,
action="store_true")
parser.add_argument("--no_flip", dest="no_flip", default=False,
action="store_true")
config = parser.parse_args()
# Fixed settings and checks ----------------------------------------------------
config.in_channels = 1
if config.output_k != config.gt_k:
assert (config.output_k > config.gt_k)
assert (config.kmeans_on_features)
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
config.dataloader_batch_sz = config.batch_sz
config.num_dataloaders = 1
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
if config.restart:
given_config = config
reloaded_config_path = os.path.join(given_config.out_dir, "config.pickle")
print("Loading restarting config from: %s" % reloaded_config_path)
with open(reloaded_config_path, "rb") as config_f:
config = pickle.load(config_f)
assert (config.model_ind == given_config.model_ind)
config.restart = True
# copy over new num_epochs and lr schedule
config.num_epochs = given_config.num_epochs
config.lr_schedule = given_config.lr_schedule
if not hasattr(config, "kmeans_on_features"):
config.kmeans_on_features = False
else:
print("Config: %s" % config_to_str(config))
# Data, nets, optimisers -------------------------------------------------------
dataloader_original, dataloader_positive, dataloader_negative, \
dataloader_test = make_triplets_data(config)
train_dataloaders = [dataloader_original, dataloader_positive,
dataloader_negative]
net = archs.__dict__[config.arch](config)
if config.restart:
model_path = os.path.join(config.out_dir, "latest_net.pytorch")
taking_best = not os.path.exists(model_path)
if taking_best:
print("using best instead of latest")
model_path = os.path.join(config.out_dir, "best_net.pytorch")
net.load_state_dict(
torch.load(model_path, map_location=lambda storage, loc: storage))
net.cuda()
net = torch.nn.DataParallel(net)
net.train()
optimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)
if config.restart:
opt_path = os.path.join(config.out_dir, "latest_optimiser.pytorch")
if taking_best:
opt_path = os.path.join(config.out_dir, "best_optimiser.pytorch")
optimiser.load_state_dict(torch.load(opt_path))
# Results storage --------------------------------------------------------------
if config.restart:
if not taking_best:
next_epoch = config.last_epoch + 1 # corresponds to last saved model
else:
next_epoch = np.argmax(np.array(config.epoch_acc)) + 1
print("starting from epoch %d" % next_epoch)
config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot
config.epoch_loss = config.epoch_loss[:next_epoch]
config.masses = config.masses[:next_epoch, :]
config.per_class_acc = config.per_class_acc[:next_epoch, :]
else:
config.epoch_acc = []
config.epoch_loss = []
config.masses = None
config.per_class_acc = None
_ = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Pre: time %s: \n %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
next_epoch = 1
fig, axarr = plt.subplots(4, sharex=False, figsize=(20, 20))
# Train ------------------------------------------------------------------------
for e_i in xrange(next_epoch, config.num_epochs):
print("Starting e_i: %d" % (e_i))
if e_i in config.lr_schedule:
optimiser = update_lr(optimiser, lr_mult=config.lr_mult)
avg_loss = 0. # over heads and head_epochs (and sub_heads)
avg_loss_count = 0
sys.stdout.flush()
iterators = (d for d in train_dataloaders)
b_i = 0
for tup in itertools.izip(*iterators):
net.module.zero_grad()
imgs_orig = tup[0][0].cuda()
imgs_pos = tup[1][0].cuda()
imgs_neg = tup[2][0].cuda()
outs_orig = net(imgs_orig)
outs_pos = net(imgs_pos)
outs_neg = net(imgs_neg)
curr_loss = triplets_loss(outs_orig, outs_pos, outs_neg)
if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10):
print("Model ind %d epoch %d batch %d "
"loss %f time %s" % \
(config.model_ind, e_i, b_i, curr_loss.item(), datetime.now()))
sys.stdout.flush()
if not np.isfinite(float(curr_loss.item())):
print("Loss is not finite... %s:" % str(curr_loss.item()))
exit(1)
avg_loss += curr_loss.item()
avg_loss_count += 1
curr_loss.backward()
optimiser.step()
b_i += 1
if b_i == 2 and config.test_code:
break
avg_loss = float(avg_loss / avg_loss_count)
config.epoch_loss.append(avg_loss)
# Eval and storage -----------------------------------------------------------
# when epoch over both heads is finished
is_best = triplets_eval(config, net,
dataloader_test=dataloader_test,
sobel=False)
print("Time %s, acc %s" % (datetime.now(), config.epoch_acc[-1]))
sys.stdout.flush()
axarr[0].clear()
axarr[0].plot(config.epoch_acc)
axarr[0].set_title("acc, top: %f" % max(config.epoch_acc))
axarr[1].clear()
axarr[1].plot(config.epoch_loss)
axarr[1].set_title("Loss")
axarr[2].clear()
for c in xrange(config.gt_k):
axarr[2].plot(config.masses[:, c])
axarr[2].set_title("masses")
axarr[3].clear()
for c in xrange(config.gt_k):
axarr[3].plot(config.per_class_acc[:, c])
axarr[3].set_title("per_class_acc")
fig.tight_layout()
fig.canvas.draw_idle()
fig.savefig(os.path.join(config.out_dir, "plots.png"))
if is_best or (e_i % config.save_freq == 0):
net.module.cpu()
if is_best:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "best_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "best_optimiser.pytorch"))
if e_i % config.save_freq == 0:
torch.save(net.module.state_dict(),
os.path.join(config.out_dir, "latest_net.pytorch"))
torch.save(optimiser.state_dict(),
os.path.join(config.out_dir, "latest_optimiser.pytorch"))
config.last_epoch = e_i # for last saved version
net.module.cuda()
with open(os.path.join(config.out_dir, "config.pickle"),
'wb') as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"),
"w") as text_file:
text_file.write("%s" % config)
if config.test_code:
exit(0)
| en | 0.582372 | Triplets. Makes output distribution same as that of attractor, and different to that of repeller. Greyscale version (no sobel). # Options ---------------------------------------------------------------------- # num pairs # transforms # used for "positive" sample # type name # allow diff crop for imgs_tf # Fixed settings and checks ---------------------------------------------------- # copy over new num_epochs and lr schedule # Data, nets, optimisers ------------------------------------------------------- # Results storage -------------------------------------------------------------- # corresponds to last saved model # in case we overshot # Train ------------------------------------------------------------------------ # over heads and head_epochs (and sub_heads) # Eval and storage ----------------------------------------------------------- # when epoch over both heads is finished # for last saved version | 2.344658 | 2 |
migrations/versions/0084_add_job_stats.py | cds-snc/notifier-api | 41 | 453 | """empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| """empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| en | 0.355998 | empty message Revision ID: 0084_add_job_stats Revises: 0083_add_perm_types_and_svc_perm Create Date: 2017-05-12 13:16:14.147368 # revision identifiers, used by Alembic. | 1.759295 | 2 |
addons/twofactor/tests/test_models.py | tsukaeru/RDM-osf.io | 11 | 454 | <reponame>tsukaeru/RDM-osf.io
import unittest
from future.moves.urllib.parse import urlparse, urljoin, parse_qs
import pytest
from addons.twofactor.tests.utils import _valid_code
from nose.tools import (assert_equal, assert_false, assert_is_none,
assert_is_not_none, assert_true)
from osf_tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestCallbacks(unittest.TestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
def test_add_to_user(self):
assert_equal(self.user_settings.totp_drift, 0)
assert_is_not_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_unconfirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_confirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.is_confirmed = True
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
class TestUserSettingsModel(unittest.TestCase):
TOTP_SECRET = 'b8f85986068f8079aa9d'
TOTP_SECRET_B32 = 'XD4FTBQGR6AHTKU5'
def setUp(self):
super(TestUserSettingsModel, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
self.user_settings.totp_secret = self.TOTP_SECRET
self.user_settings.save()
def tearDown(self):
super(TestUserSettingsModel, self).tearDown()
self.user.__class__.delete(self.user)
def test_b32(self):
assert_equal(self.user_settings.totp_secret_b32, self.TOTP_SECRET_B32)
def test_otpauth_url(self):
url = urlparse(self.user_settings.otpauth_url)
assert_equal(url.scheme, 'otpauth')
assert_equal(url.netloc, 'totp')
assert_equal(url.path, '/RDM:{}'.format(self.user.username))
assert_equal(
parse_qs(url.query),
{'secret': [self.TOTP_SECRET_B32]}
)
def test_json(self):
# url = 'otpauth://totp/RDM:{}?secret=' + self.TOTP_SECRET_B32
settings = self.user_settings.to_json(user=None)
assert_equal(
settings,
{
'is_enabled': True,
'addon_full_name': 'Two-factor Authentication',
'addon_short_name': 'twofactor',
'drift': 0,
'is_confirmed': False,
'nodes': [],
'secret': self.TOTP_SECRET_B32,
'has_auth': False,
}
)
def test_verify_valid_code(self):
assert_true(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
def test_verify_valid_core_drift(self):
# use a code from 30 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=1)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 1)
# use a code from 60 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=2)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 2)
# use the current code (which is now 2 periods away from the drift)
assert_false(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
| import unittest
from future.moves.urllib.parse import urlparse, urljoin, parse_qs
import pytest
from addons.twofactor.tests.utils import _valid_code
from nose.tools import (assert_equal, assert_false, assert_is_none,
assert_is_not_none, assert_true)
from osf_tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestCallbacks(unittest.TestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
def test_add_to_user(self):
assert_equal(self.user_settings.totp_drift, 0)
assert_is_not_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_unconfirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_confirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.is_confirmed = True
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
class TestUserSettingsModel(unittest.TestCase):
TOTP_SECRET = 'b8f85986068f8079aa9d'
TOTP_SECRET_B32 = 'XD4FTBQGR6AHTKU5'
def setUp(self):
super(TestUserSettingsModel, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
self.user_settings.totp_secret = self.TOTP_SECRET
self.user_settings.save()
def tearDown(self):
super(TestUserSettingsModel, self).tearDown()
self.user.__class__.delete(self.user)
def test_b32(self):
assert_equal(self.user_settings.totp_secret_b32, self.TOTP_SECRET_B32)
def test_otpauth_url(self):
url = urlparse(self.user_settings.otpauth_url)
assert_equal(url.scheme, 'otpauth')
assert_equal(url.netloc, 'totp')
assert_equal(url.path, '/RDM:{}'.format(self.user.username))
assert_equal(
parse_qs(url.query),
{'secret': [self.TOTP_SECRET_B32]}
)
def test_json(self):
# url = 'otpauth://totp/RDM:{}?secret=' + self.TOTP_SECRET_B32
settings = self.user_settings.to_json(user=None)
assert_equal(
settings,
{
'is_enabled': True,
'addon_full_name': 'Two-factor Authentication',
'addon_short_name': 'twofactor',
'drift': 0,
'is_confirmed': False,
'nodes': [],
'secret': self.TOTP_SECRET_B32,
'has_auth': False,
}
)
def test_verify_valid_code(self):
assert_true(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
def test_verify_valid_core_drift(self):
# use a code from 30 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=1)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 1)
# use a code from 60 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=2)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 2)
# use the current code (which is now 2 periods away from the drift)
assert_false(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
) | en | 0.913471 | # drift defaults to 0. Change it so we can test it was changed back. # drift defaults to 0. Change it so we can test it was changed back. # url = 'otpauth://totp/RDM:{}?secret=' + self.TOTP_SECRET_B32 # use a code from 30 seconds in the future # make sure drift is updated. # use a code from 60 seconds in the future # make sure drift is updated. # use the current code (which is now 2 periods away from the drift) | 2.143448 | 2 |
betterloader/standard_transforms.py | BinItAI/BetterLoader | 39 | 455 | <filename>betterloader/standard_transforms.py
import numpy as np
from torchvision import transforms
np.random.seed(1)
class TransformWhileSampling(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
x1 = self.transform(sample)
x2 = self.transform(sample)
return x1, x2 | <filename>betterloader/standard_transforms.py
import numpy as np
from torchvision import transforms
np.random.seed(1)
class TransformWhileSampling(object):
def __init__(self, transform):
self.transform = transform
def __call__(self, sample):
x1 = self.transform(sample)
x2 = self.transform(sample)
return x1, x2 | none | 1 | 2.558488 | 3 |
|
lanedet/runner/utils/net_utils.py | ztjsw/lanedet | 1 | 456 | import torch
import os
from torch import nn
import numpy as np
import torch.nn.functional
from termcolor import colored
from .logger import get_logger
def save_model(net, optim, scheduler, recorder, is_best=False):
model_dir = os.path.join(recorder.work_dir, 'ckpt')
os.system('mkdir -p {}'.format(model_dir))
epoch = recorder.epoch
ckpt_name = 'best' if is_best else epoch
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(ckpt_name)))
# remove previous pretrained model if the number of models is too big
# pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
# if len(pths) <= 2:
# return
# os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths)))))
def load_network_specified(net, model_dir, logger=None):
pretrained_net = torch.load(model_dir)['net']
net_state = net.state_dict()
state = {}
for k, v in pretrained_net.items():
if k not in net_state.keys() or v.size() != net_state[k].size():
if logger:
logger.info('skip weights: ' + k)
continue
state[k] = v
net.load_state_dict(state, strict=False)
def load_network(net, model_dir, finetune_from=None, logger=None):
if finetune_from:
if logger:
logger.info('Finetune model from: ' + finetune_from)
load_network_specified(net, finetune_from, logger)
return
pretrained_model = torch.load(model_dir)
net.load_state_dict(pretrained_model['net'], strict=True)
| import torch
import os
from torch import nn
import numpy as np
import torch.nn.functional
from termcolor import colored
from .logger import get_logger
def save_model(net, optim, scheduler, recorder, is_best=False):
model_dir = os.path.join(recorder.work_dir, 'ckpt')
os.system('mkdir -p {}'.format(model_dir))
epoch = recorder.epoch
ckpt_name = 'best' if is_best else epoch
torch.save({
'net': net.state_dict(),
'optim': optim.state_dict(),
'scheduler': scheduler.state_dict(),
'recorder': recorder.state_dict(),
'epoch': epoch
}, os.path.join(model_dir, '{}.pth'.format(ckpt_name)))
# remove previous pretrained model if the number of models is too big
# pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)]
# if len(pths) <= 2:
# return
# os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths)))))
def load_network_specified(net, model_dir, logger=None):
pretrained_net = torch.load(model_dir)['net']
net_state = net.state_dict()
state = {}
for k, v in pretrained_net.items():
if k not in net_state.keys() or v.size() != net_state[k].size():
if logger:
logger.info('skip weights: ' + k)
continue
state[k] = v
net.load_state_dict(state, strict=False)
def load_network(net, model_dir, finetune_from=None, logger=None):
if finetune_from:
if logger:
logger.info('Finetune model from: ' + finetune_from)
load_network_specified(net, finetune_from, logger)
return
pretrained_model = torch.load(model_dir)
net.load_state_dict(pretrained_model['net'], strict=True)
| en | 0.378729 | # remove previous pretrained model if the number of models is too big # pths = [int(pth.split('.')[0]) for pth in os.listdir(model_dir)] # if len(pths) <= 2: # return # os.system('rm {}'.format(os.path.join(model_dir, '{}.pth'.format(min(pths))))) | 2.208344 | 2 |
hexafuel_oil/hexafuel_oil_app/apps.py | zante95/Hexafuel-Oil | 0 | 457 | from django.apps import AppConfig #pragma: no cover
class HexafuelOilAppConfig(AppConfig): #pragma: no cover
name = 'hexafuel_oil_app'
| from django.apps import AppConfig #pragma: no cover
class HexafuelOilAppConfig(AppConfig): #pragma: no cover
name = 'hexafuel_oil_app'
| en | 0.358944 | #pragma: no cover #pragma: no cover | 1.175293 | 1 |
main.py | jonodrew/matchex | 0 | 458 | from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
| from __future__ import division
from timeit import default_timer as timer
import csv
import numpy as np
import itertools
from munkres import Munkres, print_matrix, make_cost_matrix
import sys
from classes import *
from functions import *
from math import sqrt
import Tkinter as tk
import tkFileDialog as filedialog
root = tk.Tk()
root.withdraw()
p_file = filedialog.askopenfilename(title='Please select the posting file')
c_file = filedialog.askopenfilename(title='Please select the candidate file')
"""for use with /users/java_jonathan/postings_lge.csv and
/Users/java_jonathan/candidates_lge.csv"""
# p_file = raw_input("Please enter the path for the postings file: ")
# p_file = p_file.strip()
# c_file = raw_input("Please enter the path for the candidate file: ")
# c_file = c_file.strip()
start = timer()
with open(p_file,'r') as f:
#with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f:
reader = csv.reader(f)
postingsAll = list(reader)
with open(c_file,'r') as f:
reader = csv.reader(f)
candidatesAll = list(reader)
"""create empty lists to fill with lists of lists output by iterating function
below"""
names = []
totalMatrix = []
for list in candidatesAll:
candidate = Candidate(*list)
names.append(candidate.name)
n = 0
for list in postingsAll:
posting = Posting(*list)
totalMatrix.append(matchDept(posting,candidate) + matchAnchor(posting,candidate)
+matchLocation(posting,candidate) + matchCompetency(posting,candidate) +
matchSkill(posting,candidate)+matchCohort(posting,candidate))
n += 1
l = len(names)
names.extend([0] * (n-l))
totalMatrix.extend([0] * (n**2 - len(totalMatrix)))
totalMatrix = np.asarray(totalMatrix)
totalMatrix = np.reshape(totalMatrix,(n,-1))
#at this point the matrix is structured as candidates down and jobs across
totalMatrix = np.transpose(totalMatrix)
#now it's switched!
totalMatrix = np.subtract(np.amax(totalMatrix),totalMatrix)
totalMatrix = np.array(totalMatrix)
minSuitability = 18
check = []
result = []
m = Munkres()
indexes = m.compute(totalMatrix)
#print_matrix(totalMatrix, msg='Lowest cost through this matrix:')
total = 0.0
unhappy_candidates = 0
medium_candidates = 0
tenpc_candidates = 0
qs_candidates = 0
vs_candidates = 0
f = open('output.txt', 'w')
for row, column in indexes:
if column < l:
value = totalMatrix[row][column]
if value > minSuitability*0.9:
tenpc_candidates += 1
elif value > minSuitability*0.75:
medium_candidates += 1
elif value > minSuitability/2:
unhappy_candidates += 1
elif value > minSuitability*0.25:
qs_candidates += 1
elif value > minSuitability*0.1:
vs_candidates += 1
total += value
check.append(column+1)
result.append((row,column))
f.write('For candidate %s: \nOptimal position: %d (score %s)\n'
% (names[column], column+1, value))
else:
pass
globalSatisfaction = 100*(1-(total/(l*minSuitability)))
print('Global satisfaction: %.2f%%' % globalSatisfaction)
print('Candidates who are more than 90%% suitable: %d' % vs_candidates)
print('Candidates who are more than 75%% suitable: %d' % qs_candidates)
print('Candidates who are more than 50%% suitable: %d' % (l-unhappy_candidates))
print('Candidates who are more than 75%% unsuitable: %d' % medium_candidates)
print('Candidates who are more than 90%% unsuitable: %d' % tenpc_candidates)
#output from excel:
correct = [1,3,5,9,10,2,4,8,6,7]
#this function tests output above against Excel:
#test(correct,check)
topMatrix = topFive(names,totalMatrix)
#print(topMatrix)
np.savetxt('/Users/java_jonathan/test.csv',topMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
np.savetxt('/Users/java_jonathan/test2.csv',totalMatrix, fmt='%s', delimiter=',',
newline='\n', header='', footer='', comments='# ')
end = timer()
print(end-start)
"""
#posting = [Posting(*postingsAll)]
#print(posting[0].anchor)
#print(posting)
#print(candidatesAll)
#print(postingsAll)
#print(postingsAll[0].name)
#print(preferences)
#print(postings)
#split up files into relative blocks
postCode = [lists[0] for lists in postings]
postDept = [lists[1] for lists in postings]
postAnchor = [lists[2] for lists in postings]
postSkills = [lists[3:5] for lists in postings]
postLocation = [lists[5] for lists in postings]
postCompetencies = [lists[7:10] for lists in postings]
postSecurity = [lists[10] for lists in postings]
#with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f:
#gives first column ie candidate a
a=totalMatrix[:,[0]]
#b = totalMatrix[:,[0]]
#print(a)
#converts 1D matrix to list for ease
a = np.array(a).tolist()
#print(a)
#creates list called output containing rank of score
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
#creates tuples of rank, job and appends to list
jobRank = []
# for rank, b in zip(output, postCode):
# jobScore = (rank,b)
# list(jobScore)
# jobRank.append(jobScore)
# print(jobRank)
output = [0] * len(a)
for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])):
output[x] = i
print(output)
# #print(a)
# jobRank = sorted(jobRank, reverse=False)
# print(jobRank)
# print('For candidate a, the best position is %s') % (jobRank[0][1])
# print(candidate[0].skills)
"""
| en | 0.644844 | for use with /users/java_jonathan/postings_lge.csv and /Users/java_jonathan/candidates_lge.csv # p_file = raw_input("Please enter the path for the postings file: ") # p_file = p_file.strip() # c_file = raw_input("Please enter the path for the candidate file: ") # c_file = c_file.strip() #with open('/Users/Jonathan/Google Drive/CPD/Python/postings.csv','r') as f: create empty lists to fill with lists of lists output by iterating function below #at this point the matrix is structured as candidates down and jobs across #now it's switched! #print_matrix(totalMatrix, msg='Lowest cost through this matrix:') #output from excel: #this function tests output above against Excel: #test(correct,check) #print(topMatrix) #posting = [Posting(*postingsAll)] #print(posting[0].anchor) #print(posting) #print(candidatesAll) #print(postingsAll) #print(postingsAll[0].name) #print(preferences) #print(postings) #split up files into relative blocks postCode = [lists[0] for lists in postings] postDept = [lists[1] for lists in postings] postAnchor = [lists[2] for lists in postings] postSkills = [lists[3:5] for lists in postings] postLocation = [lists[5] for lists in postings] postCompetencies = [lists[7:10] for lists in postings] postSecurity = [lists[10] for lists in postings] #with open('/Users/Jonathan/Google Drive/CPD/Python/candidates.csv','r') as f: #gives first column ie candidate a a=totalMatrix[:,[0]] #b = totalMatrix[:,[0]] #print(a) #converts 1D matrix to list for ease a = np.array(a).tolist() #print(a) #creates list called output containing rank of score output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) #creates tuples of rank, job and appends to list jobRank = [] # for rank, b in zip(output, postCode): # jobScore = (rank,b) # list(jobScore) # jobRank.append(jobScore) # print(jobRank) output = [0] * len(a) for i, x in enumerate(sorted(range(len(a)), key=lambda y: a[y])): output[x] = i print(output) # #print(a) # jobRank = sorted(jobRank, reverse=False) # print(jobRank) # print('For candidate a, the best position is %s') % (jobRank[0][1]) # print(candidate[0].skills) | 3.015289 | 3 |
pip_info/setup.py | 95616ARG/SyReNN | 36 | 459 | <reponame>95616ARG/SyReNN<filename>pip_info/setup.py
"""Setup script for PySyReNN.
Adapted from:
https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
"""
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "pysyrenn"
PACKAGES = [
"syrenn_proto",
"pysyrenn",
"pysyrenn.frontend",
"pysyrenn.helpers",
]
META_PATH = "__metadata__.py"
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["torch"]
with open("requirements.txt") as requirements:
reading = False
for line in requirements.readlines():
if line.startswith("# PYSYRENN"):
reading = True
elif line.startswith("# END"):
reading = False
elif line.startswith("#"):
pass
elif reading:
INSTALL_REQUIRES.append(line.strip().split("==")[0])
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "."},
package_data={"": ["pysyrenn/**/*.py"]},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| """Setup script for PySyReNN.
Adapted from:
https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
"""
import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "pysyrenn"
PACKAGES = [
"syrenn_proto",
"pysyrenn",
"pysyrenn.frontend",
"pysyrenn.helpers",
]
META_PATH = "__metadata__.py"
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = ["torch"]
with open("requirements.txt") as requirements:
reading = False
for line in requirements.readlines():
if line.startswith("# PYSYRENN"):
reading = True
elif line.startswith("# END"):
reading = False
elif line.startswith("#"):
pass
elif reading:
INSTALL_REQUIRES.append(line.strip().split("==")[0])
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
long_description_content_type="text/markdown",
packages=PACKAGES,
package_dir={"": "."},
package_data={"": ["pysyrenn/**/*.py"]},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
) | de | 0.356174 | Setup script for PySyReNN. Adapted from: https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/ ################################################################### ################################################################### Build an absolute path from *parts* and and return the contents of the resulting file. Assume UTF-8 encoding. Extract __*meta*__ from META_FILE. | 1.936945 | 2 |
neptune/generated/swagger_client/path_constants.py | jiji-online/neptune-cli | 0 | 460 | <filename>neptune/generated/swagger_client/path_constants.py
REST_PATH = u""
WS_PATH = u"/api/notifications/v1"
| <filename>neptune/generated/swagger_client/path_constants.py
REST_PATH = u""
WS_PATH = u"/api/notifications/v1"
| none | 1 | 1.047223 | 1 |
|
load/__init__.py | andrewp-as-is/load.py | 0 | 461 | __all__ = ["load"]
import imp
import importlib
def load(name, path):
"""Load and initialize a module implemented as a Python source file and return its module object"""
if hasattr(importlib, "machinery"):
loader = importlib.machinery.SourceFileLoader(name, path)
return loader.load_module()
return imp.load_source(name, path)
| __all__ = ["load"]
import imp
import importlib
def load(name, path):
"""Load and initialize a module implemented as a Python source file and return its module object"""
if hasattr(importlib, "machinery"):
loader = importlib.machinery.SourceFileLoader(name, path)
return loader.load_module()
return imp.load_source(name, path)
| en | 0.778786 | Load and initialize a module implemented as a Python source file and return its module object | 2.700181 | 3 |
pygears/svgen/modules/sieve.py | Risto97/pygears | 0 | 462 | <gh_stars>0
import itertools
from pygears.common.sieve import sieve
from pygears.svgen.inst import SVGenInstPlugin
from pygears.svgen.svmod import SVModuleGen
from functools import partial
from pygears.svgen.svgen import SVGenPlugin
from pygears.svgen.util import svgen_visitor
from pygears.core.hier_node import HierVisitorBase
from pygears.svgen.inst import svgen_inst
from pygears.rtl.gear import RTLGearHierVisitor, is_gear_instance
def index_to_sv_slice(dtype, key):
subtype = dtype[key]
if isinstance(key, slice):
key = key.start
if key is None or key == 0:
low_pos = 0
else:
low_pos = int(dtype[:key])
high_pos = low_pos + int(subtype) - 1
return f'{high_pos}:{low_pos}'
class SVGenSieve(SVModuleGen):
@property
def is_generated(self):
return True
def get_module(self, template_env):
def get_stages():
for s in itertools.chain(self.node.pre_sieves, [self.node]):
indexes = s.params['key']
if not isinstance(indexes, tuple):
indexes = (indexes, )
dtype = s.in_ports[0].dtype
out_type = s.out_ports[0].dtype
slices = list(
map(
partial(index_to_sv_slice, dtype),
filter(lambda i: int(dtype[i]) > 0, indexes)))
yield slices, out_type
stages = list(get_stages())
# If any of the sieves has shrunk data to 0 width, there is nothing to
# do
if any(i[0] == [] for i in stages):
stages = []
context = {
'stages': stages,
'module_name': self.sv_module_name,
'intfs': list(self.sv_port_configs())
}
return template_env.render_local(__file__, "sieve.j2", context)
@svgen_visitor
class RemoveEqualReprSieveVisitor(RTLGearHierVisitor):
def sieve(self, node):
pout = node.out_ports[0]
pin = node.in_ports[0]
if pin.dtype == pout.dtype:
node.bypass()
@svgen_visitor
class CollapseSievesVisitor(RTLGearHierVisitor):
def sieve(self, node):
if not hasattr(node, 'pre_sieves'):
node.pre_sieves = []
sieve_cons = [
p for p in node.consumers if is_gear_instance(p.node, sieve)
]
pin = node.in_ports[0]
pout = node.out_ports[0]
iin = pin.producer
iout = pout.consumer
if sieve_cons:
# There is a Sieve connected to this Sieve, hence we can combine
# two of them into a single SV module
# Connect the consumers of this Sieve, which are Sieves themselves,
# to this Sieve's predecessor
for cons_pin in iout.consumers.copy():
consumer = cons_pin.node
if is_gear_instance(consumer, sieve):
# print(f'Merging {node.name} to {consumer.name}')
# print(consumer.params['key'])
# If the consumer is a Sieve, just register this Sieve with
# it, and short circuit this one
consumer.pre_sieves = node.pre_sieves + [node]
iout.disconnect(cons_pin)
iin.connect(cons_pin)
# print(f'Remaining conusmer: {[p.node.name for p in node.consumers]}')
if not node.consumers:
# Finally, if ther are no consumers left for this sieve remove
# this Sieve completely (with all it's connections) from the
# SVGen tree
node.remove()
iout.remove()
class SVGenSievePlugin(SVGenInstPlugin, SVGenPlugin):
@classmethod
def bind(cls):
cls.registry['svgen']['module_namespace'][sieve] = SVGenSieve
cls.registry['svgen']['flow'].insert(
cls.registry['svgen']['flow'].index(svgen_inst),
CollapseSievesVisitor)
# cls.registry['SVGenFlow'].insert(
# cls.registry['SVGenFlow'].key(CollapseSievesVisitor),
# RemoveEqualReprSieveVisitor)
| import itertools
from pygears.common.sieve import sieve
from pygears.svgen.inst import SVGenInstPlugin
from pygears.svgen.svmod import SVModuleGen
from functools import partial
from pygears.svgen.svgen import SVGenPlugin
from pygears.svgen.util import svgen_visitor
from pygears.core.hier_node import HierVisitorBase
from pygears.svgen.inst import svgen_inst
from pygears.rtl.gear import RTLGearHierVisitor, is_gear_instance
def index_to_sv_slice(dtype, key):
subtype = dtype[key]
if isinstance(key, slice):
key = key.start
if key is None or key == 0:
low_pos = 0
else:
low_pos = int(dtype[:key])
high_pos = low_pos + int(subtype) - 1
return f'{high_pos}:{low_pos}'
class SVGenSieve(SVModuleGen):
@property
def is_generated(self):
return True
def get_module(self, template_env):
def get_stages():
for s in itertools.chain(self.node.pre_sieves, [self.node]):
indexes = s.params['key']
if not isinstance(indexes, tuple):
indexes = (indexes, )
dtype = s.in_ports[0].dtype
out_type = s.out_ports[0].dtype
slices = list(
map(
partial(index_to_sv_slice, dtype),
filter(lambda i: int(dtype[i]) > 0, indexes)))
yield slices, out_type
stages = list(get_stages())
# If any of the sieves has shrunk data to 0 width, there is nothing to
# do
if any(i[0] == [] for i in stages):
stages = []
context = {
'stages': stages,
'module_name': self.sv_module_name,
'intfs': list(self.sv_port_configs())
}
return template_env.render_local(__file__, "sieve.j2", context)
@svgen_visitor
class RemoveEqualReprSieveVisitor(RTLGearHierVisitor):
def sieve(self, node):
pout = node.out_ports[0]
pin = node.in_ports[0]
if pin.dtype == pout.dtype:
node.bypass()
@svgen_visitor
class CollapseSievesVisitor(RTLGearHierVisitor):
def sieve(self, node):
if not hasattr(node, 'pre_sieves'):
node.pre_sieves = []
sieve_cons = [
p for p in node.consumers if is_gear_instance(p.node, sieve)
]
pin = node.in_ports[0]
pout = node.out_ports[0]
iin = pin.producer
iout = pout.consumer
if sieve_cons:
# There is a Sieve connected to this Sieve, hence we can combine
# two of them into a single SV module
# Connect the consumers of this Sieve, which are Sieves themselves,
# to this Sieve's predecessor
for cons_pin in iout.consumers.copy():
consumer = cons_pin.node
if is_gear_instance(consumer, sieve):
# print(f'Merging {node.name} to {consumer.name}')
# print(consumer.params['key'])
# If the consumer is a Sieve, just register this Sieve with
# it, and short circuit this one
consumer.pre_sieves = node.pre_sieves + [node]
iout.disconnect(cons_pin)
iin.connect(cons_pin)
# print(f'Remaining conusmer: {[p.node.name for p in node.consumers]}')
if not node.consumers:
# Finally, if ther are no consumers left for this sieve remove
# this Sieve completely (with all it's connections) from the
# SVGen tree
node.remove()
iout.remove()
class SVGenSievePlugin(SVGenInstPlugin, SVGenPlugin):
@classmethod
def bind(cls):
cls.registry['svgen']['module_namespace'][sieve] = SVGenSieve
cls.registry['svgen']['flow'].insert(
cls.registry['svgen']['flow'].index(svgen_inst),
CollapseSievesVisitor)
# cls.registry['SVGenFlow'].insert(
# cls.registry['SVGenFlow'].key(CollapseSievesVisitor),
# RemoveEqualReprSieveVisitor) | en | 0.797006 | # If any of the sieves has shrunk data to 0 width, there is nothing to # do # There is a Sieve connected to this Sieve, hence we can combine # two of them into a single SV module # Connect the consumers of this Sieve, which are Sieves themselves, # to this Sieve's predecessor # print(f'Merging {node.name} to {consumer.name}') # print(consumer.params['key']) # If the consumer is a Sieve, just register this Sieve with # it, and short circuit this one # print(f'Remaining conusmer: {[p.node.name for p in node.consumers]}') # Finally, if ther are no consumers left for this sieve remove # this Sieve completely (with all it's connections) from the # SVGen tree # cls.registry['SVGenFlow'].insert( # cls.registry['SVGenFlow'].key(CollapseSievesVisitor), # RemoveEqualReprSieveVisitor) | 2.141918 | 2 |
examples/my_model_test.py | gzpyy/qlib | 0 | 463 | <gh_stars>0
#encoding=utf-8
import qlib
import pandas as pd
import pickle
import xgboost as xgb
import numpy as np
import re
from qlib.constant import REG_US
from qlib.utils import exists_qlib_data, init_instance_by_config
from qlib.workflow import R
from qlib.workflow.record_temp import SignalRecord, PortAnaRecord
from qlib.utils import flatten_dict
from qlib.data import LocalExpressionProvider
from qlib.data.ops import Operators, OpsList
from qlib.data.base import Feature
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Grid
from my_data_handler import MyAlphaHandler
# model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model'
model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model'
with open(model_file, 'rb') as fi:
model = pickle.load(fi)
exprs, columns = MyAlphaHandler.get_custom_config()
raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time'])
raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00")
raw_data.set_index('time', inplace=True)
raw_data["vwap"] = np.nan
raw_data.sort_index(inplace=True)
# print(raw_data)
class MyFeature(Feature):
def _load_internal(self, instrument, start_index, end_index, freq):
print("load", self._name, instrument, start_index, end_index, freq)
return raw_data.loc[start_index:end_index][self._name]
Operators.register(OpsList + [MyFeature])
def my_parse_field(field):
if not isinstance(field, str):
field = str(field)
for pattern, new in [(r"\$(\w+)", rf'MyFeature("\1")'), (r"(\w+\s*)\(", r"Operators.\1(")]: # Features # Operators
field = re.sub(pattern, new, field)
return field
obj = dict()
for field in exprs:
expression = eval(my_parse_field(field))
series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min")
series = series.astype(np.float32)
obj[field] = series
data = pd.DataFrame(obj)
data.columns = columns
view_time_start = '2022-02-11'
view_time_end = '2022-02-12'
pre_data = raw_data.loc[view_time_start:view_time_end].copy()
pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end]))
pre_data['pred_score'] = pred
records = pre_data.to_dict("records")
cash = 50000
position = {}
hold_thresh = 5
score_thresh = 0.001
x_axises, y_axises, mark_points, money = [], [], [], []
for record in records:
x_axises.append(record['data_time'])
y_axises.append([
record['open'], record['close'], record['low'], record['high']
])
if 'hold_cnt' in position:
position['hold_cnt'] += 1
if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh):
cash += position['amount'] * record['open']
position = {}
#print("sell")
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='triangle', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="green")
))
elif record['pred_score'] > score_thresh and not position:
position = dict(record)
position['amount'] = int(cash / position['open'])
cash -= position['amount'] * position['open']
# buy
#print("buy")
position['hold_cnt'] = 0
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='arrow', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="yellow")
))
cur_money = cash
if position:
cur_money += position['amount'] * record['close']
money.append(cur_money)
if position:
cash += position['amount'] * records[-1]['close']
print("cash:", cash)
kline_graph = (
Kline()
.add_xaxis(x_axises)
.add_yaxis(
"kline",
y_axises,
markpoint_opts=opts.MarkPointOpts(
data=mark_points
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(is_scale=True),
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)),
datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)],
)
)
kline_line = (
Line()
.add_xaxis(xaxis_data=x_axises)
.add_yaxis(
series_name="cur_money",
y_axis=money,
is_smooth=True,
linestyle_opts=opts.LineStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
markline_opts=opts.MarkLineOpts(
data=[opts.MarkLineItem(y=50000)]
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
min_='dataMin'
)
)
)
grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px'))
grid_chart.add(
kline_graph,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"),
)
grid_chart.add(
kline_line,
grid_opts=opts.GridOpts(
pos_left="3%", pos_right="10%", pos_top="60%", height="30%"
),
)
grid_chart.render("kline_markline.html") | #encoding=utf-8
import qlib
import pandas as pd
import pickle
import xgboost as xgb
import numpy as np
import re
from qlib.constant import REG_US
from qlib.utils import exists_qlib_data, init_instance_by_config
from qlib.workflow import R
from qlib.workflow.record_temp import SignalRecord, PortAnaRecord
from qlib.utils import flatten_dict
from qlib.data import LocalExpressionProvider
from qlib.data.ops import Operators, OpsList
from qlib.data.base import Feature
from pyecharts import options as opts
from pyecharts.charts import Kline, Line, Grid
from my_data_handler import MyAlphaHandler
# model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model'
model_file = r'.\mlruns\1\148ef1cd7acd48deac3eadc339ad3008\artifacts\trained_model'
with open(model_file, 'rb') as fi:
model = pickle.load(fi)
exprs, columns = MyAlphaHandler.get_custom_config()
raw_data = pd.read_csv('../stock_data/TSLA.csv', parse_dates=['time'])
raw_data['data_time'] = raw_data['time'].dt.strftime("%Y-%m-%d %H:%M:00")
raw_data.set_index('time', inplace=True)
raw_data["vwap"] = np.nan
raw_data.sort_index(inplace=True)
# print(raw_data)
class MyFeature(Feature):
def _load_internal(self, instrument, start_index, end_index, freq):
print("load", self._name, instrument, start_index, end_index, freq)
return raw_data.loc[start_index:end_index][self._name]
Operators.register(OpsList + [MyFeature])
def my_parse_field(field):
if not isinstance(field, str):
field = str(field)
for pattern, new in [(r"\$(\w+)", rf'MyFeature("\1")'), (r"(\w+\s*)\(", r"Operators.\1(")]: # Features # Operators
field = re.sub(pattern, new, field)
return field
obj = dict()
for field in exprs:
expression = eval(my_parse_field(field))
series = expression.load('TSLA', "2022-01-02", "2022-02-28", "1min")
series = series.astype(np.float32)
obj[field] = series
data = pd.DataFrame(obj)
data.columns = columns
view_time_start = '2022-02-11'
view_time_end = '2022-02-12'
pre_data = raw_data.loc[view_time_start:view_time_end].copy()
pred=model.model.predict(xgb.DMatrix(data.loc[view_time_start:view_time_end]))
pre_data['pred_score'] = pred
records = pre_data.to_dict("records")
cash = 50000
position = {}
hold_thresh = 5
score_thresh = 0.001
x_axises, y_axises, mark_points, money = [], [], [], []
for record in records:
x_axises.append(record['data_time'])
y_axises.append([
record['open'], record['close'], record['low'], record['high']
])
if 'hold_cnt' in position:
position['hold_cnt'] += 1
if position and (record['open'] >= position['close'] * 1.01 or record['open'] < position['close'] * 0.995 or record['pred_score'] < -score_thresh or position['hold_cnt'] >= hold_thresh):
cash += position['amount'] * record['open']
position = {}
#print("sell")
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='triangle', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="green")
))
elif record['pred_score'] > score_thresh and not position:
position = dict(record)
position['amount'] = int(cash / position['open'])
cash -= position['amount'] * position['open']
# buy
#print("buy")
position['hold_cnt'] = 0
mark_points.append(opts.MarkPointItem(
coord=[record['data_time'], record['high']],
symbol='arrow', symbol_size=7,
itemstyle_opts=opts.ItemStyleOpts(color="yellow")
))
cur_money = cash
if position:
cur_money += position['amount'] * record['close']
money.append(cur_money)
if position:
cash += position['amount'] * records[-1]['close']
print("cash:", cash)
kline_graph = (
Kline()
.add_xaxis(x_axises)
.add_yaxis(
"kline",
y_axises,
markpoint_opts=opts.MarkPointOpts(
data=mark_points
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(is_scale=True),
yaxis_opts=opts.AxisOpts(
is_scale=True,
splitarea_opts=opts.SplitAreaOpts(
is_show=True, areastyle_opts=opts.AreaStyleOpts(opacity=1)
),
),
title_opts=opts.TitleOpts(title="%s_%s" % (view_time_start, view_time_end)),
datazoom_opts=[opts.DataZoomOpts(type_="inside", xaxis_index=[0, 1],)],
)
)
kline_line = (
Line()
.add_xaxis(xaxis_data=x_axises)
.add_yaxis(
series_name="cur_money",
y_axis=money,
is_smooth=True,
linestyle_opts=opts.LineStyleOpts(opacity=0.5),
label_opts=opts.LabelOpts(is_show=False),
markline_opts=opts.MarkLineOpts(
data=[opts.MarkLineItem(y=50000)]
),
)
.set_global_opts(
xaxis_opts=opts.AxisOpts(
type_="category",
grid_index=2,
axislabel_opts=opts.LabelOpts(is_show=False),
),
yaxis_opts=opts.AxisOpts(
min_='dataMin'
)
)
)
grid_chart = Grid(init_opts=opts.InitOpts(width='2000px', height='900px'))
grid_chart.add(
kline_graph,
grid_opts=opts.GridOpts(pos_left="3%", pos_right="10%", height="50%"),
)
grid_chart.add(
kline_line,
grid_opts=opts.GridOpts(
pos_left="3%", pos_right="10%", pos_top="60%", height="30%"
),
)
grid_chart.render("kline_markline.html") | en | 0.561444 | #encoding=utf-8 # model_file = r'.\mlruns\1\d6536b056ba84a74be6b33971f443cf6\artifacts\trained_model' # print(raw_data) # Features # Operators #print("sell") # buy #print("buy") | 1.877635 | 2 |
realfastapi/routes/endpoints/default.py | wborbajr/RealFastAPI | 0 | 464 | from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def working():
return {"Working"}
| from fastapi import APIRouter
router = APIRouter()
@router.get("/")
def working():
return {"Working"}
| none | 1 | 2.11721 | 2 |
|
graphzoom/embed_methods/dgi/execute.py | junhoher/GraphZoom | 16 | 465 | <filename>graphzoom/embed_methods/dgi/execute.py
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import networkx as nx
import time
from embed_methods.dgi.models import DGI, LogReg
from embed_methods.dgi.utils import process
def dgi(G, features):
batch_size = 1
nb_epochs = 10000
patience = 20
lr = 0.001
l2_coef = 0.0
drop_prob = 0.0
hid_units = 512
sparse = True
nonlinearity = 'prelu' # special name to separate parameters
adj = nx.to_scipy_sparse_matrix(G, weight='wgt')
features = sp.lil_matrix(np.matrix(features))
features, _ = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
if sparse:
sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = (adj + sp.eye(adj.shape[0])).todense()
features = torch.FloatTensor(features[np.newaxis])
if not sparse:
adj = torch.FloatTensor(adj[np.newaxis])
model = DGI(ft_size, hid_units, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
if torch.cuda.is_available():
print('Using CUDA')
model.cuda()
features = features.cuda()
if sparse:
sp_adj = sp_adj.cuda()
else:
adj = adj.cuda()
b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
cnt_wait = 0
best = 1e9
best_t = 0
for epoch in range(nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(nb_nodes)
shuf_fts = features[:, idx, :]
lbl_1 = torch.ones(batch_size, nb_nodes)
lbl_2 = torch.zeros(batch_size, nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
if torch.cuda.is_available():
shuf_fts = shuf_fts.cuda()
lbl = lbl.cuda()
logits = model(features, shuf_fts, sp_adj if sparse else adj, sparse, None, None, None)
loss = b_xent(logits, lbl)
print('Loss:', loss)
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
else:
cnt_wait += 1
if cnt_wait == patience:
print("epochs: ", epoch)
print('Early stopping!')
break
loss.backward()
optimiser.step()
return (((model.embed(features, sp_adj if sparse else adj, sparse, None)[0]).squeeze()).data).cpu().numpy()
| <filename>graphzoom/embed_methods/dgi/execute.py
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import networkx as nx
import time
from embed_methods.dgi.models import DGI, LogReg
from embed_methods.dgi.utils import process
def dgi(G, features):
batch_size = 1
nb_epochs = 10000
patience = 20
lr = 0.001
l2_coef = 0.0
drop_prob = 0.0
hid_units = 512
sparse = True
nonlinearity = 'prelu' # special name to separate parameters
adj = nx.to_scipy_sparse_matrix(G, weight='wgt')
features = sp.lil_matrix(np.matrix(features))
features, _ = process.preprocess_features(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))
if sparse:
sp_adj = process.sparse_mx_to_torch_sparse_tensor(adj)
else:
adj = (adj + sp.eye(adj.shape[0])).todense()
features = torch.FloatTensor(features[np.newaxis])
if not sparse:
adj = torch.FloatTensor(adj[np.newaxis])
model = DGI(ft_size, hid_units, nonlinearity)
optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)
if torch.cuda.is_available():
print('Using CUDA')
model.cuda()
features = features.cuda()
if sparse:
sp_adj = sp_adj.cuda()
else:
adj = adj.cuda()
b_xent = nn.BCEWithLogitsLoss()
xent = nn.CrossEntropyLoss()
cnt_wait = 0
best = 1e9
best_t = 0
for epoch in range(nb_epochs):
model.train()
optimiser.zero_grad()
idx = np.random.permutation(nb_nodes)
shuf_fts = features[:, idx, :]
lbl_1 = torch.ones(batch_size, nb_nodes)
lbl_2 = torch.zeros(batch_size, nb_nodes)
lbl = torch.cat((lbl_1, lbl_2), 1)
if torch.cuda.is_available():
shuf_fts = shuf_fts.cuda()
lbl = lbl.cuda()
logits = model(features, shuf_fts, sp_adj if sparse else adj, sparse, None, None, None)
loss = b_xent(logits, lbl)
print('Loss:', loss)
if loss < best:
best = loss
best_t = epoch
cnt_wait = 0
else:
cnt_wait += 1
if cnt_wait == patience:
print("epochs: ", epoch)
print('Early stopping!')
break
loss.backward()
optimiser.step()
return (((model.embed(features, sp_adj if sparse else adj, sparse, None)[0]).squeeze()).data).cpu().numpy()
| en | 0.072173 | # special name to separate parameters | 2.155236 | 2 |
tools/ci/deploy_to_github_release.py | rodb70/RDMnet | 30 | 466 | <gh_stars>10-100
"""Deploys binaries to a GitHub release given the specified tag name."""
import argparse
import os
import time
from github import Github
THIS_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
GH_REPO_IDENT = "ETCLabs/RDMnet"
GH_USERNAME = "svc-etclabs"
GH_API_TOKEN = os.getenv("SVC_ETCLABS_REPO_TOKEN")
def deploy_binaries(version: str):
"""Deploys staged binaries to a new GitHub Release."""
g = Github(login_or_token=GH_USERNAME, password=GH_API_TOKEN)
repo = g.get_repo(GH_REPO_IDENT)
print(f"Waiting for the correct GitHub tag v{version} to become available...")
keep_trying = True
while keep_trying:
for tag in repo.get_tags():
if tag.name == f"v{version}":
keep_trying = False # Tag now exists
break
if keep_trying:
time.sleep(5)
print(f"Tag v{version} available. Creating release...")
new_release = repo.create_git_release(
tag=f"v{version}",
name=f"RDMnet v{version}",
message=f"Automated release of RDMnet for v{version}",
)
new_release.upload_asset("RDMnetSetup_x86.msi")
new_release.upload_asset("RDMnetSetup_x64.msi")
new_release.upload_asset("RDMnet.pkg")
def main():
parser = argparse.ArgumentParser(
description="Deploy RDMnet artifacts to GitHub Release"
)
parser.add_argument("version", help="Artifact version being deployed")
args = parser.parse_args()
# Make sure our cwd is the root of the repository
os.chdir(os.path.abspath(os.path.join(THIS_FILE_DIRECTORY, "..", "..")))
deploy_binaries(args.version)
if __name__ == "__main__":
main()
| """Deploys binaries to a GitHub release given the specified tag name."""
import argparse
import os
import time
from github import Github
THIS_FILE_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
GH_REPO_IDENT = "ETCLabs/RDMnet"
GH_USERNAME = "svc-etclabs"
GH_API_TOKEN = os.getenv("SVC_ETCLABS_REPO_TOKEN")
def deploy_binaries(version: str):
"""Deploys staged binaries to a new GitHub Release."""
g = Github(login_or_token=GH_USERNAME, password=GH_API_TOKEN)
repo = g.get_repo(GH_REPO_IDENT)
print(f"Waiting for the correct GitHub tag v{version} to become available...")
keep_trying = True
while keep_trying:
for tag in repo.get_tags():
if tag.name == f"v{version}":
keep_trying = False # Tag now exists
break
if keep_trying:
time.sleep(5)
print(f"Tag v{version} available. Creating release...")
new_release = repo.create_git_release(
tag=f"v{version}",
name=f"RDMnet v{version}",
message=f"Automated release of RDMnet for v{version}",
)
new_release.upload_asset("RDMnetSetup_x86.msi")
new_release.upload_asset("RDMnetSetup_x64.msi")
new_release.upload_asset("RDMnet.pkg")
def main():
parser = argparse.ArgumentParser(
description="Deploy RDMnet artifacts to GitHub Release"
)
parser.add_argument("version", help="Artifact version being deployed")
args = parser.parse_args()
# Make sure our cwd is the root of the repository
os.chdir(os.path.abspath(os.path.join(THIS_FILE_DIRECTORY, "..", "..")))
deploy_binaries(args.version)
if __name__ == "__main__":
main() | en | 0.752667 | Deploys binaries to a GitHub release given the specified tag name. Deploys staged binaries to a new GitHub Release. # Tag now exists # Make sure our cwd is the root of the repository | 2.798251 | 3 |
matchms/filtering/add_losses.py | maximskorik/matchms | 0 | 467 | import logging
import numpy
from ..Fragments import Fragments
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType:
"""Derive losses based on precursor mass.
Parameters
----------
spectrum_in:
Input spectrum.
loss_mz_from:
Minimum allowed m/z value for losses. Default is 0.0.
loss_mz_to:
Maximum allowed m/z value for losses. Default is 1000.0.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
precursor_mz = spectrum.get("precursor_mz", None)
if precursor_mz:
assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.",
"Consider applying 'add_precursor_mz' filter first.")
peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities
losses_mz = (precursor_mz - peaks_mz)[::-1]
losses_intensities = peaks_intensities[::-1]
# Add losses which are within given boundaries
mask = numpy.where((losses_mz >= loss_mz_from)
& (losses_mz <= loss_mz_to))
spectrum.losses = Fragments(mz=losses_mz[mask],
intensities=losses_intensities[mask])
else:
logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
return spectrum
| import logging
import numpy
from ..Fragments import Fragments
from ..typing import SpectrumType
logger = logging.getLogger("matchms")
def add_losses(spectrum_in: SpectrumType, loss_mz_from=0.0, loss_mz_to=1000.0) -> SpectrumType:
"""Derive losses based on precursor mass.
Parameters
----------
spectrum_in:
Input spectrum.
loss_mz_from:
Minimum allowed m/z value for losses. Default is 0.0.
loss_mz_to:
Maximum allowed m/z value for losses. Default is 1000.0.
"""
if spectrum_in is None:
return None
spectrum = spectrum_in.clone()
precursor_mz = spectrum.get("precursor_mz", None)
if precursor_mz:
assert isinstance(precursor_mz, (float, int)), ("Expected 'precursor_mz' to be a scalar number.",
"Consider applying 'add_precursor_mz' filter first.")
peaks_mz, peaks_intensities = spectrum.peaks.mz, spectrum.peaks.intensities
losses_mz = (precursor_mz - peaks_mz)[::-1]
losses_intensities = peaks_intensities[::-1]
# Add losses which are within given boundaries
mask = numpy.where((losses_mz >= loss_mz_from)
& (losses_mz <= loss_mz_to))
spectrum.losses = Fragments(mz=losses_mz[mask],
intensities=losses_intensities[mask])
else:
logger.warning("No precursor_mz found. Consider applying 'add_precursor_mz' filter first.")
return spectrum
| en | 0.813257 | Derive losses based on precursor mass. Parameters ---------- spectrum_in: Input spectrum. loss_mz_from: Minimum allowed m/z value for losses. Default is 0.0. loss_mz_to: Maximum allowed m/z value for losses. Default is 1000.0. # Add losses which are within given boundaries | 2.460241 | 2 |
cornflow_client/schema/dictSchema.py | baobabsoluciones/cornflow-client | 3 | 468 | <filename>cornflow_client/schema/dictSchema.py<gh_stars>1-10
import re
from .dict_functions import gen_schema, ParameterSchema, sort_dict
from cornflow_client.constants import JSON_TYPES, DATASCHEMA
class DictSchema:
"""
A json-schema to dict-schema parser
"""
def __init__(self, jsonschema):
"""
Class to manage internal dictionary schema
:param jsonschema: a json schema
"""
self.types = JSON_TYPES
schema_dict = self.get_empty_schema()
if "definitions" in jsonschema:
for item in jsonschema["definitions"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
if "properties" in jsonschema:
for item in jsonschema["properties"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
self._create_data_schema(
schema_dict=schema_dict,
item=item,
required_list=jsonschema.get("required"),
)
self.schema = schema_dict
def get_schema(self):
return self.schema
@staticmethod
def get_empty_schema():
"""
Create un empty schema dict
"""
return {DATASCHEMA: []}
def _create_data_schema(self, schema_dict, item, required_list=None):
"""
Add a schema to schema_dict[DATASCHEMA]
:param item: (key, value) of a dict. The key contains the name of the schema
and the value contains its content.
return the schema dict.
"""
name, content = item
if required_list is None:
required_list = []
schema = dict(
name=name,
type=self._get_type_or_new_schema(item),
many=("type" in content and content["type"] == "array"),
required=name in required_list,
)
schema_dict[DATASCHEMA].append(schema)
return schema
def _get_element_dict(self, schema_dict, item, required_list=None):
"""
Parse an item (key, value) from the jsonschema and return the corresponding dict.
:param item: An item from the jsonschema (key, value)
:param required_list: A list of names corresponding to the required fields in the parent object
:return A dict element for a schema_dict.
"""
if required_list is None:
required_list = []
name, content = item
if "type" not in content:
if "$ref" in content:
return {
"name": name,
"type": self._get_ref(item),
"many": False,
"required": (name in required_list),
}
else:
print("\nType missing for item: {}".format(name))
raise TypeError("Type missing")
if content["type"] == "object":
return {
"name": name,
"type": self._get_object_schema(schema_dict=schema_dict, item=item),
"many": False,
"required": (name in required_list),
}
elif content["type"] == "array":
return {
"name": name,
"type": self._get_array_schema(schema_dict=schema_dict, item=item),
"many": True,
"required": (name in required_list),
}
else:
return self._get_field_dict(item, required_list)
def _get_object_schema(self, schema_dict, item):
"""
Transform an object item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema objects are similar to python dict.
The object in jsonschema is in the following format:
"object_name": {"type":"object", "properties":{"field1": {...}, "filed2": {...}}, "required": ["field1]}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)}, ...]
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
ell = {
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
schema_dict.update(ell)
return schema_name
def _get_array_schema(self, schema_dict, item):
"""
Transform a array item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema arrays are similar to python lists.
The object in jsonschema is in the following format:
"object_name": {"type":"array", "items":{format_of_items}}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
content = content["items"]
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
if "type" in content and content["type"] == "object":
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
)
elif "$ref" in content:
schema_name = self._get_ref((None, content))
elif "type" in content and content["type"] != "array":
return self._get_type(content["type"])
else:
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content.items()
]
}
)
return schema_name
def _get_field_dict(self, item, required_list=None):
"""
Transform a "normal" item from the jsonschema in a dict for the schema_dict and return it.
This is used for items that will directly translate into fields.
:param item: The jsonschema item in format (key, value)
:param required_list: a list of the fields required in the parent object.
:return: the schema_dict for this item
"""
d = dict(
name=item[0],
type=self._get_type(item[1]["type"]),
required=(item[0] in required_list),
allow_none=("null" in item[1]["type"]),
many=False,
)
return d
def _get_ref(self, item):
"""
Get the name of the schema for a jsonschema reference.
jsonschema definitions are parsed first and corresponding schema are created so a schema should exist
corresponding to the reference.
:param item: The jsonschema item in format (key, value)
The value should be in the following format: {"$ref": "#/definitions/object_name"}
:return The schema name (_get_schema_name(object_name))
"""
content = item[1]
ref = re.search("definitions/(.+)", content["$ref"]).group(1)
return self._get_schema_name(ref)
def _get_type_or_new_schema(self, item):
"""
returns a new schema or a type depending on the json_type
"""
name, content = item
if "type" not in content or content["type"] == "object":
return self._get_schema_name(name)
elif content["type"] == "array":
return self._get_type_or_new_schema((name, content["items"]))
else:
return self._get_type(content["type"])
def _get_type(self, json_type):
"""
Translate the type between jsonschema and schema_dict.
:param json_type: the type in jsonschema
:return: the type in schema_dict.
"""
if type(json_type) is list:
not_null_type = [i for i in json_type if i != "null"]
if len(not_null_type) > 1:
raise Warning("Warning: more than one type given")
return self.types[not_null_type[0]]
else:
return self.types[json_type]
@staticmethod
def _get_schema_name(name, n=0):
"""
Transform an element name into a schema name in order to create a schema corresponding to an object or array.
The schema name use the following format:
[name][n]Schema (for example if name is "values" and n is 3: Values3Schema)
:param name: The name of the object or array.
:param n: if n is different from 0, it is added to the schema name.
:return: the corresponding schema name.
"""
if n == 0:
return name.capitalize() + "Schema"
else:
return name.capitalize() + str(n) + "Schema"
def _get_new_schema_name(self, schema_dict, name, n=0):
try_name = self._get_schema_name(name, n)
if try_name in schema_dict:
return self._get_new_schema_name(
schema_dict=schema_dict, name=name, n=n + 1
)
else:
return try_name
@staticmethod
def _get_required(content):
"""
Get the list of required name of it exist.
:content: the dict which should have a "required" key.value
:return: The required list or empty list.
"""
return content.get("required", [])
def to_marshmallow(self):
dict_params = self.schema
result_dict = {}
ordered = sort_dict(dict_params)
tuplist = sorted(dict_params.items(), key=lambda v: ordered[v[0]])
for key, params in tuplist:
schema = ParameterSchema()
# this line validates the list of parameters:
params1 = schema.load(params, many=True)
result_dict[key] = gen_schema(key, params1, result_dict)
return result_dict[DATASCHEMA]
| <filename>cornflow_client/schema/dictSchema.py<gh_stars>1-10
import re
from .dict_functions import gen_schema, ParameterSchema, sort_dict
from cornflow_client.constants import JSON_TYPES, DATASCHEMA
class DictSchema:
"""
A json-schema to dict-schema parser
"""
def __init__(self, jsonschema):
"""
Class to manage internal dictionary schema
:param jsonschema: a json schema
"""
self.types = JSON_TYPES
schema_dict = self.get_empty_schema()
if "definitions" in jsonschema:
for item in jsonschema["definitions"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
if "properties" in jsonschema:
for item in jsonschema["properties"].items():
self._get_element_dict(schema_dict=schema_dict, item=item)
self._create_data_schema(
schema_dict=schema_dict,
item=item,
required_list=jsonschema.get("required"),
)
self.schema = schema_dict
def get_schema(self):
return self.schema
@staticmethod
def get_empty_schema():
"""
Create un empty schema dict
"""
return {DATASCHEMA: []}
def _create_data_schema(self, schema_dict, item, required_list=None):
"""
Add a schema to schema_dict[DATASCHEMA]
:param item: (key, value) of a dict. The key contains the name of the schema
and the value contains its content.
return the schema dict.
"""
name, content = item
if required_list is None:
required_list = []
schema = dict(
name=name,
type=self._get_type_or_new_schema(item),
many=("type" in content and content["type"] == "array"),
required=name in required_list,
)
schema_dict[DATASCHEMA].append(schema)
return schema
def _get_element_dict(self, schema_dict, item, required_list=None):
"""
Parse an item (key, value) from the jsonschema and return the corresponding dict.
:param item: An item from the jsonschema (key, value)
:param required_list: A list of names corresponding to the required fields in the parent object
:return A dict element for a schema_dict.
"""
if required_list is None:
required_list = []
name, content = item
if "type" not in content:
if "$ref" in content:
return {
"name": name,
"type": self._get_ref(item),
"many": False,
"required": (name in required_list),
}
else:
print("\nType missing for item: {}".format(name))
raise TypeError("Type missing")
if content["type"] == "object":
return {
"name": name,
"type": self._get_object_schema(schema_dict=schema_dict, item=item),
"many": False,
"required": (name in required_list),
}
elif content["type"] == "array":
return {
"name": name,
"type": self._get_array_schema(schema_dict=schema_dict, item=item),
"many": True,
"required": (name in required_list),
}
else:
return self._get_field_dict(item, required_list)
def _get_object_schema(self, schema_dict, item):
"""
Transform an object item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema objects are similar to python dict.
The object in jsonschema is in the following format:
"object_name": {"type":"object", "properties":{"field1": {...}, "filed2": {...}}, "required": ["field1]}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)}, ...]
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
ell = {
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
schema_dict.update(ell)
return schema_name
def _get_array_schema(self, schema_dict, item):
"""
Transform a array item from the jsonschema in a dict for the schema_dict and update self.schema_dict.
In jsonschema arrays are similar to python lists.
The object in jsonschema is in the following format:
"object_name": {"type":"array", "items":{format_of_items}}
The schema_dict object use the format:
{"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)
:param item: The jsonschema item (key, value)
The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}})
:return: The schema name
"""
name, content = item
content = content["items"]
schema_name = self._get_new_schema_name(schema_dict=schema_dict, name=name)
if "type" in content and content["type"] == "object":
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content["properties"].items()
]
}
)
elif "$ref" in content:
schema_name = self._get_ref((None, content))
elif "type" in content and content["type"] != "array":
return self._get_type(content["type"])
else:
schema_dict.update(
{
schema_name: [
self._get_element_dict(
schema_dict=schema_dict,
item=i,
required_list=self._get_required(content),
)
for i in content.items()
]
}
)
return schema_name
def _get_field_dict(self, item, required_list=None):
"""
Transform a "normal" item from the jsonschema in a dict for the schema_dict and return it.
This is used for items that will directly translate into fields.
:param item: The jsonschema item in format (key, value)
:param required_list: a list of the fields required in the parent object.
:return: the schema_dict for this item
"""
d = dict(
name=item[0],
type=self._get_type(item[1]["type"]),
required=(item[0] in required_list),
allow_none=("null" in item[1]["type"]),
many=False,
)
return d
def _get_ref(self, item):
"""
Get the name of the schema for a jsonschema reference.
jsonschema definitions are parsed first and corresponding schema are created so a schema should exist
corresponding to the reference.
:param item: The jsonschema item in format (key, value)
The value should be in the following format: {"$ref": "#/definitions/object_name"}
:return The schema name (_get_schema_name(object_name))
"""
content = item[1]
ref = re.search("definitions/(.+)", content["$ref"]).group(1)
return self._get_schema_name(ref)
def _get_type_or_new_schema(self, item):
"""
returns a new schema or a type depending on the json_type
"""
name, content = item
if "type" not in content or content["type"] == "object":
return self._get_schema_name(name)
elif content["type"] == "array":
return self._get_type_or_new_schema((name, content["items"]))
else:
return self._get_type(content["type"])
def _get_type(self, json_type):
"""
Translate the type between jsonschema and schema_dict.
:param json_type: the type in jsonschema
:return: the type in schema_dict.
"""
if type(json_type) is list:
not_null_type = [i for i in json_type if i != "null"]
if len(not_null_type) > 1:
raise Warning("Warning: more than one type given")
return self.types[not_null_type[0]]
else:
return self.types[json_type]
@staticmethod
def _get_schema_name(name, n=0):
"""
Transform an element name into a schema name in order to create a schema corresponding to an object or array.
The schema name use the following format:
[name][n]Schema (for example if name is "values" and n is 3: Values3Schema)
:param name: The name of the object or array.
:param n: if n is different from 0, it is added to the schema name.
:return: the corresponding schema name.
"""
if n == 0:
return name.capitalize() + "Schema"
else:
return name.capitalize() + str(n) + "Schema"
def _get_new_schema_name(self, schema_dict, name, n=0):
try_name = self._get_schema_name(name, n)
if try_name in schema_dict:
return self._get_new_schema_name(
schema_dict=schema_dict, name=name, n=n + 1
)
else:
return try_name
@staticmethod
def _get_required(content):
"""
Get the list of required name of it exist.
:content: the dict which should have a "required" key.value
:return: The required list or empty list.
"""
return content.get("required", [])
def to_marshmallow(self):
dict_params = self.schema
result_dict = {}
ordered = sort_dict(dict_params)
tuplist = sorted(dict_params.items(), key=lambda v: ordered[v[0]])
for key, params in tuplist:
schema = ParameterSchema()
# this line validates the list of parameters:
params1 = schema.load(params, many=True)
result_dict[key] = gen_schema(key, params1, result_dict)
return result_dict[DATASCHEMA]
| en | 0.608015 | A json-schema to dict-schema parser Class to manage internal dictionary schema :param jsonschema: a json schema Create un empty schema dict Add a schema to schema_dict[DATASCHEMA] :param item: (key, value) of a dict. The key contains the name of the schema and the value contains its content. return the schema dict. Parse an item (key, value) from the jsonschema and return the corresponding dict. :param item: An item from the jsonschema (key, value) :param required_list: A list of names corresponding to the required fields in the parent object :return A dict element for a schema_dict. Transform an object item from the jsonschema in a dict for the schema_dict and update self.schema_dict. In jsonschema objects are similar to python dict. The object in jsonschema is in the following format: "object_name": {"type":"object", "properties":{"field1": {...}, "filed2": {...}}, "required": ["field1]} The schema_dict object use the format: {"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False)}, ...] :param item: The jsonschema item (key, value) The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}}) :return: The schema name Transform a array item from the jsonschema in a dict for the schema_dict and update self.schema_dict. In jsonschema arrays are similar to python lists. The object in jsonschema is in the following format: "object_name": {"type":"array", "items":{format_of_items}} The schema_dict object use the format: {"schema_name": [{"name":"field1", "type": "field1_type", "many": False, "required":(True or False) :param item: The jsonschema item (key, value) The format of the item is: ("object_name", {"type":"object", "properties":{"a": {...}, "b": {...}}) :return: The schema name Transform a "normal" item from the jsonschema in a dict for the schema_dict and return it. This is used for items that will directly translate into fields. :param item: The jsonschema item in format (key, value) :param required_list: a list of the fields required in the parent object. :return: the schema_dict for this item Get the name of the schema for a jsonschema reference. jsonschema definitions are parsed first and corresponding schema are created so a schema should exist corresponding to the reference. :param item: The jsonschema item in format (key, value) The value should be in the following format: {"$ref": "#/definitions/object_name"} :return The schema name (_get_schema_name(object_name)) returns a new schema or a type depending on the json_type Translate the type between jsonschema and schema_dict. :param json_type: the type in jsonschema :return: the type in schema_dict. Transform an element name into a schema name in order to create a schema corresponding to an object or array. The schema name use the following format: [name][n]Schema (for example if name is "values" and n is 3: Values3Schema) :param name: The name of the object or array. :param n: if n is different from 0, it is added to the schema name. :return: the corresponding schema name. Get the list of required name of it exist. :content: the dict which should have a "required" key.value :return: The required list or empty list. # this line validates the list of parameters: | 2.449196 | 2 |
rspub/util/test/test_resourcefilter.py | EHRI/rspub-core | 1 | 469 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import unittest
import rspub.util.resourcefilter as rf
def on_windows():
opsys = platform.system()
return opsys == "Windows"
class TestPredicates(unittest.TestCase):
def test_directory_pattern_filter_empty(self):
dpf = rf.directory_pattern_predicate() # should pass all strings
self.assertTrue(dpf(""))
self.assertTrue(dpf("."))
self.assertTrue(dpf("\n"))
self.assertTrue(dpf("foo"))
# rejects not string
self.assertFalse(dpf(None))
self.assertFalse(dpf(42))
self.assertFalse(dpf(self))
def test_directory_pattern_filter(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_directory_pattern_filter_windows(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\babcd\\bar\\some.txt"))
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
self.assertTrue(dpf("c:\\foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
self.assertFalse(dpf("c:\\foo\\bar\\baz\\abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("^c:\\abc")
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
#
self.assertFalse(dpf("abc\\bar\\some.txt"))
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc\\abc\\bar\\some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
self.assertFalse(dpf("abc\\abc\\bar\\abc.abc"))
def test_last_modified_filter(self):
file_name = os.path.realpath(__file__)
lmaf = rf.last_modified_after_predicate()
self.assertTrue(lmaf(file_name))
lmaf = rf.last_modified_after_predicate(3000000000)
# valid until 2065-01-24 06:20:00
self.assertFalse(lmaf(file_name))
lmaf = rf.last_modified_after_predicate("2016-08-01")
self.assertTrue(lmaf(file_name))
def test_example(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_example_windows(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
assert xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.xml")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.txt")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_def\\my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_windows_to_unix(self):
path = os.path.expanduser("~")
dpf = rf.directory_pattern_predicate("^" + path)
self.assertTrue(dpf(os.path.join(path, "bla")))
dpf = rf.directory_pattern_predicate("^C:\\Users")
self.assertTrue(dpf(os.path.join(path, "bla")))
| #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import platform
import unittest
import rspub.util.resourcefilter as rf
def on_windows():
opsys = platform.system()
return opsys == "Windows"
class TestPredicates(unittest.TestCase):
def test_directory_pattern_filter_empty(self):
dpf = rf.directory_pattern_predicate() # should pass all strings
self.assertTrue(dpf(""))
self.assertTrue(dpf("."))
self.assertTrue(dpf("\n"))
self.assertTrue(dpf("foo"))
# rejects not string
self.assertFalse(dpf(None))
self.assertFalse(dpf(42))
self.assertFalse(dpf(self))
def test_directory_pattern_filter(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_directory_pattern_filter_windows(self):
dpf = rf.directory_pattern_predicate("abc")
self.assertTrue(dpf("foo/babcd/bar/some.txt"))
self.assertTrue(dpf("/abc/bar/some.txt"))
self.assertTrue(dpf("/foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\babcd\\bar\\some.txt"))
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
self.assertTrue(dpf("c:\\foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("/foo/bar/baz/abc.txt"))
self.assertFalse(dpf("c:\\foo\\bar\\baz\\abc.txt"))
# ##
dpf = rf.directory_pattern_predicate("^/abc")
self.assertTrue(dpf("/abc/bar/some.txt"))
#
self.assertFalse(dpf("abc/bar/some.txt"))
# #
dpf = rf.directory_pattern_predicate("^c:\\abc")
self.assertTrue(dpf("c:\\abc\\bar\\some.txt"))
#
self.assertFalse(dpf("abc\\bar\\some.txt"))
dpf = rf.directory_pattern_predicate("abc$")
self.assertTrue(dpf("foo/bar/abc/some.txt"))
self.assertTrue(dpf("foo\\bar\\abc\\some.txt"))
#
self.assertFalse(dpf("abc/abc/bar/some.txt"))
self.assertFalse(dpf("abc\\abc\\bar\\some.txt"))
self.assertFalse(dpf("abc/abc/bar/abc.abc"))
self.assertFalse(dpf("abc\\abc\\bar\\abc.abc"))
def test_last_modified_filter(self):
file_name = os.path.realpath(__file__)
lmaf = rf.last_modified_after_predicate()
self.assertTrue(lmaf(file_name))
lmaf = rf.last_modified_after_predicate(3000000000)
# valid until 2065-01-24 06:20:00
self.assertFalse(lmaf(file_name))
lmaf = rf.last_modified_after_predicate("2016-08-01")
self.assertTrue(lmaf(file_name))
def test_example(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_example_windows(self):
import rspub.util.resourcefilter as rf
dir_ends_with_abc = rf.directory_pattern_predicate("abc$")
assert dir_ends_with_abc("/foo/bar/folder_abc/my_resource.txt")
assert not dir_ends_with_abc("/foo/bar/folder_def/my_resource.txt")
xml_file = rf.filename_pattern_predicate(".xml$")
assert xml_file("my_resource.xml")
assert not xml_file("my_resource.txt")
import rspub.util.gates as lf
xml_files_in_abc = lf.and_(dir_ends_with_abc, xml_file)
assert xml_files_in_abc("/foo/bar/folder_abc/my_resource.xml")
assert not xml_files_in_abc("/foo/bar/folder_abc/my_resource.txt")
assert not xml_files_in_abc("/foo/bar/folder_def/my_resource.xml")
assert xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.xml")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_abc\\my_resource.txt")
assert not xml_files_in_abc("c:\\foo\\bar\\folder_def\\my_resource.xml")
recent = rf.last_modified_after_predicate("2016-08-01")
includes = [xml_files_in_abc]
excludes = [recent]
resource_gate = lf.gate(includes, excludes)
# print(type(resource_gate))
@unittest.skipUnless(on_windows(), "Only tested on Windows.")
def test_windows_to_unix(self):
path = os.path.expanduser("~")
dpf = rf.directory_pattern_predicate("^" + path)
self.assertTrue(dpf(os.path.join(path, "bla")))
dpf = rf.directory_pattern_predicate("^C:\\Users")
self.assertTrue(dpf(os.path.join(path, "bla")))
| en | 0.468988 | #! /usr/bin/env python3 # -*- coding: utf-8 -*- # should pass all strings # rejects not string # # ## # # # # # # ## # # # # # # valid until 2065-01-24 06:20:00 # print(type(resource_gate)) # print(type(resource_gate)) | 2.451362 | 2 |
molecule/ubuntu/tests/test_grafana.py | fiaasco/grafana | 0 | 470 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
""" check if packages are installed
"""
assert host.package('grafana').is_installed
def test_service(host):
""" Testing whether the service is running and enabled
"""
assert host.service('grafana-server').is_enabled
assert host.service('grafana-server').is_running
| import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
""" check if packages are installed
"""
assert host.package('grafana').is_installed
def test_service(host):
""" Testing whether the service is running and enabled
"""
assert host.service('grafana-server').is_enabled
assert host.service('grafana-server').is_running
| en | 0.802849 | check if packages are installed Testing whether the service is running and enabled | 2.076632 | 2 |
heatsink.py | sww1235/heatsink-calc | 1 | 471 | <gh_stars>1-10
"""Class representations of heatsinks."""
import math
from scipy import constants as const
from materials import Aluminium_6063 as aluminium
class Heatsink:
"""
A Heatsink.
Extended by form factor subclasses
"""
def __init__(self, material, configuration):
"""Init material and configuration variables."""
self.material = material
self.configuration = configuration
class CylindricalAnnularFin(Heatsink):
"""Extend base heatsink class with a cylindrical annular fin heatsink."""
def __init__(self, material, finSpacing, finRadius,
finThickness, cylinderDiameter, numberOfFins,
ambAirTemp, maxJunctionTemp, maxSurfaceTemp):
"""
Init remainder of class variables.
NOTE: all models are based off of the finSpacing variable
NOTE: using the simplified model for calculation efficiency.
finSpacing : gap between adjacent fins
finRadius : radius of fin minus central support cylinder
(alternatively, fin depth)
finThickness : thickness of individual fin
cylinderDiameter: diameter of support cylinder
heatsinkLength : overall axial length of heatsink
overall diameter: outside diameter of heatsink including fins.
"""
self.finSpacing = finSpacing # in meters
self.finRadius = finRadius # in meters
self.finThickness = finThickness # in meters
self.cylinderDiameter = cylinderDiameter # in meters
self.numberOfFins = numberofFins
self.heatsinkLength = ((self.finThickness * self.numberOfFins)
+ ((self.numberOfFins - 1) * self.finSpacing))
self.overallDiameter = self.cylinderDiameter + (2 * finRadius)
self.ambAirTemp = ambAirTemp # degrees kelvin
self.maxJunctionTemp = maxJunctionTemp
self.maxSurfaceTemp = maxSurfaceTemp
"""
NOTE: in order to prevent ridiculously long variable names, all
Nusselt Numbers are abbreviated as follows:
nn = Nusselt Number
nn0 = Nusselt Number 0 (Diffusive Limit)
nnOut = Nusselt Number for outer surfaces
nnIn = Nusselt Number for inner surfaces
nnInT = Nusselt Number for the thin boundry layer of inner surface
nnInFD = Nusselt Number for fully developed regime inner surface
"""
# thermal diffusivity of air at atmospheric pressure at 25C
alpha = 22.39 * 10**(-6) # (meters^2) / seconds
# Volumetric coefficient of thermal expansion
beta = aluminium.expansionCoefficient # 1/kelvin
heatsinkSurfaceTemp = # TODO kelvin
# at atmospheric pressure at 25C
kinematicViscosity = 15.52 * 10**(-6) # meter^2/second
deltaT = heatsinkSurfaceTemp - ambAirTemp # kelvin
hLoD = self.heatsinkLength / self.overallDiameter
cDoD = self.cylinderDiameter / self.overallDiameter
oneChannelArea = (math.pi * (((self.overallDiameter**2
- self.cylinderDiameter**2) / 2)
+ (self.cylinderDiameter
* self.finSpacing)))
# area of circumscribed cylinder
areaCC = (math.pi * (((self.overallDiameter**2) / 2)
+ self.overallDiameter * self.heatsinkLength)) # meter^2
# inner surface area of heatsink
areaIn = (self.numberOfFins - 1) * oneChannelArea # meter^2
# outer surface area of heatsink
areaOut = (math.pi * (((self.overallDiameter**2) / 2)
+ (self.numberOfFins
* self.overallDiameter
* self.finThickness))) # meter^2
# overall area of heatsink
areaHS = areaIn + areaOut # meter^2
RayleighNbrFinSpacing = ((const.g
* beta
* deltaT
* self.finSpacing**4)
/ (kinematicViscosity
* alpha
* self.overallDiameter))
RayleighNbrOverallDiameter = ((const.g
* beta
* deltaT
* self.overallDiameter**3)
/ (kinematicViscosity * alpha))
if 0.1 <= hLoD <= 8:
self.nn0 = ((3.36 + (0.087 * hLoD))
* math.sqrt(areaCC)
* (self.finSpacing / areaHS)
)
if 0.1 <= (self.finThickness
* self.numberOfFins
/ self.overallDiameter) <= 8:
self.nnOut = ((0.499 - (0.026 * math.log(self.finThickness
* self.numberOfFins
/ self.overallDiameter)))
* math.pow(RayleighNbrFinSpacing, 0.25)
* (areaOut/areaHS)
)
if (0.1 <= cdoD <= 8) and (2.9 * 10**4
<= RayleighNbrOverallDiameter
<= 2.3 * 10**5):
nnInT = ((0.573-(0.184 * cdoD) + (0.0388 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
nnInFD = (((0.0323
- (0.0517 * cdoD)
+ (0.11 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
+ (0.0516 + (0.0154 * cdoD)
- (0.0433 * cdoD**2)
+ (0.0792 * cdoD**3)) * RayleighNbrFinSpacing)
n = 1
self.nnIn = (math.pow(math.pow(nnInT, -n)
+ math.pow(nnInFD, -n), (-1/n)
)
* (areaIn/areaHS)
)
self.nn = (self.nnIn + self.nnOut + self.nn0)
super(Child, self).__init__(material, self.__name__)
"""
Nusselt number = (Qconv * b) / (Ahs deltaT k)
Qconv = heat flow rate by convection (Watts)
b = finSpacing (meters)
Ahs = Area of heatsink (meter^2)
deltaT = temperature difference between surface temp of
heatsink and ambient air temp.
k = thermal conductivity of material (Watts / (meter kelvin))
"""
| """Class representations of heatsinks."""
import math
from scipy import constants as const
from materials import Aluminium_6063 as aluminium
class Heatsink:
"""
A Heatsink.
Extended by form factor subclasses
"""
def __init__(self, material, configuration):
"""Init material and configuration variables."""
self.material = material
self.configuration = configuration
class CylindricalAnnularFin(Heatsink):
"""Extend base heatsink class with a cylindrical annular fin heatsink."""
def __init__(self, material, finSpacing, finRadius,
finThickness, cylinderDiameter, numberOfFins,
ambAirTemp, maxJunctionTemp, maxSurfaceTemp):
"""
Init remainder of class variables.
NOTE: all models are based off of the finSpacing variable
NOTE: using the simplified model for calculation efficiency.
finSpacing : gap between adjacent fins
finRadius : radius of fin minus central support cylinder
(alternatively, fin depth)
finThickness : thickness of individual fin
cylinderDiameter: diameter of support cylinder
heatsinkLength : overall axial length of heatsink
overall diameter: outside diameter of heatsink including fins.
"""
self.finSpacing = finSpacing # in meters
self.finRadius = finRadius # in meters
self.finThickness = finThickness # in meters
self.cylinderDiameter = cylinderDiameter # in meters
self.numberOfFins = numberofFins
self.heatsinkLength = ((self.finThickness * self.numberOfFins)
+ ((self.numberOfFins - 1) * self.finSpacing))
self.overallDiameter = self.cylinderDiameter + (2 * finRadius)
self.ambAirTemp = ambAirTemp # degrees kelvin
self.maxJunctionTemp = maxJunctionTemp
self.maxSurfaceTemp = maxSurfaceTemp
"""
NOTE: in order to prevent ridiculously long variable names, all
Nusselt Numbers are abbreviated as follows:
nn = Nusselt Number
nn0 = Nusselt Number 0 (Diffusive Limit)
nnOut = Nusselt Number for outer surfaces
nnIn = Nusselt Number for inner surfaces
nnInT = Nusselt Number for the thin boundry layer of inner surface
nnInFD = Nusselt Number for fully developed regime inner surface
"""
# thermal diffusivity of air at atmospheric pressure at 25C
alpha = 22.39 * 10**(-6) # (meters^2) / seconds
# Volumetric coefficient of thermal expansion
beta = aluminium.expansionCoefficient # 1/kelvin
heatsinkSurfaceTemp = # TODO kelvin
# at atmospheric pressure at 25C
kinematicViscosity = 15.52 * 10**(-6) # meter^2/second
deltaT = heatsinkSurfaceTemp - ambAirTemp # kelvin
hLoD = self.heatsinkLength / self.overallDiameter
cDoD = self.cylinderDiameter / self.overallDiameter
oneChannelArea = (math.pi * (((self.overallDiameter**2
- self.cylinderDiameter**2) / 2)
+ (self.cylinderDiameter
* self.finSpacing)))
# area of circumscribed cylinder
areaCC = (math.pi * (((self.overallDiameter**2) / 2)
+ self.overallDiameter * self.heatsinkLength)) # meter^2
# inner surface area of heatsink
areaIn = (self.numberOfFins - 1) * oneChannelArea # meter^2
# outer surface area of heatsink
areaOut = (math.pi * (((self.overallDiameter**2) / 2)
+ (self.numberOfFins
* self.overallDiameter
* self.finThickness))) # meter^2
# overall area of heatsink
areaHS = areaIn + areaOut # meter^2
RayleighNbrFinSpacing = ((const.g
* beta
* deltaT
* self.finSpacing**4)
/ (kinematicViscosity
* alpha
* self.overallDiameter))
RayleighNbrOverallDiameter = ((const.g
* beta
* deltaT
* self.overallDiameter**3)
/ (kinematicViscosity * alpha))
if 0.1 <= hLoD <= 8:
self.nn0 = ((3.36 + (0.087 * hLoD))
* math.sqrt(areaCC)
* (self.finSpacing / areaHS)
)
if 0.1 <= (self.finThickness
* self.numberOfFins
/ self.overallDiameter) <= 8:
self.nnOut = ((0.499 - (0.026 * math.log(self.finThickness
* self.numberOfFins
/ self.overallDiameter)))
* math.pow(RayleighNbrFinSpacing, 0.25)
* (areaOut/areaHS)
)
if (0.1 <= cdoD <= 8) and (2.9 * 10**4
<= RayleighNbrOverallDiameter
<= 2.3 * 10**5):
nnInT = ((0.573-(0.184 * cdoD) + (0.0388 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
nnInFD = (((0.0323
- (0.0517 * cdoD)
+ (0.11 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
+ (0.0516 + (0.0154 * cdoD)
- (0.0433 * cdoD**2)
+ (0.0792 * cdoD**3)) * RayleighNbrFinSpacing)
n = 1
self.nnIn = (math.pow(math.pow(nnInT, -n)
+ math.pow(nnInFD, -n), (-1/n)
)
* (areaIn/areaHS)
)
self.nn = (self.nnIn + self.nnOut + self.nn0)
super(Child, self).__init__(material, self.__name__)
"""
Nusselt number = (Qconv * b) / (Ahs deltaT k)
Qconv = heat flow rate by convection (Watts)
b = finSpacing (meters)
Ahs = Area of heatsink (meter^2)
deltaT = temperature difference between surface temp of
heatsink and ambient air temp.
k = thermal conductivity of material (Watts / (meter kelvin))
""" | en | 0.795396 | Class representations of heatsinks. A Heatsink. Extended by form factor subclasses Init material and configuration variables. Extend base heatsink class with a cylindrical annular fin heatsink. Init remainder of class variables. NOTE: all models are based off of the finSpacing variable NOTE: using the simplified model for calculation efficiency. finSpacing : gap between adjacent fins finRadius : radius of fin minus central support cylinder (alternatively, fin depth) finThickness : thickness of individual fin cylinderDiameter: diameter of support cylinder heatsinkLength : overall axial length of heatsink overall diameter: outside diameter of heatsink including fins. # in meters # in meters # in meters # in meters # degrees kelvin NOTE: in order to prevent ridiculously long variable names, all Nusselt Numbers are abbreviated as follows: nn = Nusselt Number nn0 = Nusselt Number 0 (Diffusive Limit) nnOut = Nusselt Number for outer surfaces nnIn = Nusselt Number for inner surfaces nnInT = Nusselt Number for the thin boundry layer of inner surface nnInFD = Nusselt Number for fully developed regime inner surface # thermal diffusivity of air at atmospheric pressure at 25C # (meters^2) / seconds # Volumetric coefficient of thermal expansion # 1/kelvin # TODO kelvin # at atmospheric pressure at 25C # meter^2/second # kelvin # area of circumscribed cylinder # meter^2 # inner surface area of heatsink # meter^2 # outer surface area of heatsink # meter^2 # overall area of heatsink # meter^2 Nusselt number = (Qconv * b) / (Ahs deltaT k) Qconv = heat flow rate by convection (Watts) b = finSpacing (meters) Ahs = Area of heatsink (meter^2) deltaT = temperature difference between surface temp of heatsink and ambient air temp. k = thermal conductivity of material (Watts / (meter kelvin)) | 3.312631 | 3 |
scripts/get_file_name_as_variable.py | amin-henteti/airflow-dags | 0 | 472 | <filename>scripts/get_file_name_as_variable.py<gh_stars>0
import inspect
def foo():
print(inspect.stack()[0][3])
foo() | <filename>scripts/get_file_name_as_variable.py<gh_stars>0
import inspect
def foo():
print(inspect.stack()[0][3])
foo() | none | 1 | 1.992051 | 2 |
|
sovrin/test/did/helper.py | sovrin-foundation/old-sovrin | 3 | 473 | <reponame>sovrin-foundation/old-sovrin
import base58
from plenum.common.signer_did import DidSigner
from plenum.common.verifier import DidVerifier
from plenum.common.eventually import eventually
from plenum.test.helper import assertEquality
from sovrin.common.identity import Identity
MsgForSigning = {'sender': 'Mario', 'msg': 'Lorem ipsum'}
def signMsg(wallet, idr):
return wallet.signMsg(MsgForSigning, identifier=idr)
def verifyMsg(verifier, sig):
sig = base58.b58decode(sig)
return verifier.verifyMsg(sig, MsgForSigning)
def chkVerifyForRetrievedIdentity(signerWallet, verifierWallet, idr):
sig = signMsg(signerWallet, idr)
verkey = verifierWallet.getIdentity(idr).verkey
assert verifyMsg(DidVerifier(verkey, idr), sig)
def updateWalletIdrWithFullKeySigner(wallet, idr):
newSigner = DidSigner(identifier=idr)
wallet.updateSigner(idr, newSigner)
assertEquality(newSigner.verkey, wallet.getVerkey(idr))
checkFullVerkeySize(wallet.getVerkey(idr))
return newSigner.verkey
def updateSovrinIdrWithFullKey(looper, senderWallet, senderClient, ownerWallet,
idr, fullKey):
idy = Identity(identifier=idr, verkey=fullKey)
senderWallet.updateSponsoredIdentity(idy)
# TODO: What if the request fails, there must be some rollback mechanism
assert senderWallet.getSponsoredIdentity(idr).seqNo is None
reqs = senderWallet.preparePending()
senderClient.submitReqs(*reqs)
def chk():
assert senderWallet.getSponsoredIdentity(idr).seqNo is not None
looper.run(eventually(chk, retryWait=1, timeout=5))
return ownerWallet
def fetchFullVerkeyFromSovrin(looper, senderWallet, senderClient, ownerWallet,
idr):
identity = Identity(identifier=idr)
req = senderWallet.requestIdentity(identity, sender=senderWallet.defaultId)
senderClient.submitReqs(req)
def chk():
retrievedVerkey = senderWallet.getIdentity(idr).verkey
assertEquality(retrievedVerkey, ownerWallet.getVerkey(idr))
checkFullVerkeySize(retrievedVerkey)
looper.run(eventually(chk, retryWait=1, timeout=5))
def checkDidSize(did):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes,
# since the did takes first 16 bytes, base58 of did is either
# 21 or 22 characters
assert len(did) == 21 or len(did) == 22
def checkAbbrVerkeySize(verkey):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes,
# since the abbreviated verkey takes last 16 bytes, base58 of abbreviated
# verkey is either 21 or 22 characters and since its prefixed by a `~` its
# length will be either 23 or 22
assert len(verkey) == 23 or len(verkey) == 22
def checkFullVerkeySize(verkey):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes.
assert len(verkey) == 44 or len(verkey) == 43
| import base58
from plenum.common.signer_did import DidSigner
from plenum.common.verifier import DidVerifier
from plenum.common.eventually import eventually
from plenum.test.helper import assertEquality
from sovrin.common.identity import Identity
MsgForSigning = {'sender': 'Mario', 'msg': 'Lorem ipsum'}
def signMsg(wallet, idr):
return wallet.signMsg(MsgForSigning, identifier=idr)
def verifyMsg(verifier, sig):
sig = base58.b58decode(sig)
return verifier.verifyMsg(sig, MsgForSigning)
def chkVerifyForRetrievedIdentity(signerWallet, verifierWallet, idr):
sig = signMsg(signerWallet, idr)
verkey = verifierWallet.getIdentity(idr).verkey
assert verifyMsg(DidVerifier(verkey, idr), sig)
def updateWalletIdrWithFullKeySigner(wallet, idr):
newSigner = DidSigner(identifier=idr)
wallet.updateSigner(idr, newSigner)
assertEquality(newSigner.verkey, wallet.getVerkey(idr))
checkFullVerkeySize(wallet.getVerkey(idr))
return newSigner.verkey
def updateSovrinIdrWithFullKey(looper, senderWallet, senderClient, ownerWallet,
idr, fullKey):
idy = Identity(identifier=idr, verkey=fullKey)
senderWallet.updateSponsoredIdentity(idy)
# TODO: What if the request fails, there must be some rollback mechanism
assert senderWallet.getSponsoredIdentity(idr).seqNo is None
reqs = senderWallet.preparePending()
senderClient.submitReqs(*reqs)
def chk():
assert senderWallet.getSponsoredIdentity(idr).seqNo is not None
looper.run(eventually(chk, retryWait=1, timeout=5))
return ownerWallet
def fetchFullVerkeyFromSovrin(looper, senderWallet, senderClient, ownerWallet,
idr):
identity = Identity(identifier=idr)
req = senderWallet.requestIdentity(identity, sender=senderWallet.defaultId)
senderClient.submitReqs(req)
def chk():
retrievedVerkey = senderWallet.getIdentity(idr).verkey
assertEquality(retrievedVerkey, ownerWallet.getVerkey(idr))
checkFullVerkeySize(retrievedVerkey)
looper.run(eventually(chk, retryWait=1, timeout=5))
def checkDidSize(did):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes,
# since the did takes first 16 bytes, base58 of did is either
# 21 or 22 characters
assert len(did) == 21 or len(did) == 22
def checkAbbrVerkeySize(verkey):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes,
# since the abbreviated verkey takes last 16 bytes, base58 of abbreviated
# verkey is either 21 or 22 characters and since its prefixed by a `~` its
# length will be either 23 or 22
assert len(verkey) == 23 or len(verkey) == 22
def checkFullVerkeySize(verkey):
# A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes.
assert len(verkey) == 44 or len(verkey) == 43 | en | 0.823122 | # TODO: What if the request fails, there must be some rollback mechanism # A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes, # since the did takes first 16 bytes, base58 of did is either # 21 or 22 characters # A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes, # since the abbreviated verkey takes last 16 bytes, base58 of abbreviated # verkey is either 21 or 22 characters and since its prefixed by a `~` its # length will be either 23 or 22 # A base58 encoding of 32 bytes string can be either 44 bytes or 43 bytes. | 2.154042 | 2 |
tests/test_EdiblesSpectrum.py | jancami/edibles | 8 | 474 | <gh_stars>1-10
import astropy
import datetime
import numpy as np
from edibles.utils.edibles_spectrum import EdiblesSpectrum
def testEdiblesSpectrum(filename="tests/HD170740_w860_redl_20140915_O12.fits"):
# Spectrum information
sp = EdiblesSpectrum(filename=filename, fully_featured=True, noDATADIR=True)
assert isinstance(sp.header, astropy.io.fits.header.Header)
assert isinstance(sp.target, str)
assert isinstance(sp.date, str)
assert isinstance(sp.datetime, datetime.datetime)
assert isinstance(sp.v_bary, float)
assert isinstance(sp.wave_units, str)
assert isinstance(sp.flux_units, str)
# Raw
assert isinstance(sp.raw_wave, np.ndarray)
assert isinstance(sp.raw_bary_wave, np.ndarray)
assert isinstance(sp.raw_flux, np.ndarray)
assert len(sp.raw_wave) == len(sp.raw_bary_wave)
assert len(sp.raw_wave) == len(sp.raw_flux)
assert isinstance(sp.raw_grid, np.ndarray)
assert len(sp.raw_grid) == 200443 # print(len(sp.raw_grid))
assert isinstance(sp.raw_sky_wave, np.ndarray)
assert isinstance(sp.raw_sky_flux, np.ndarray)
assert len(sp.raw_sky_wave) == len(sp.raw_sky_flux)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
# getSpectrum
xmin = 7660
xmax = 7680
sp.getSpectrum(xmin=xmin, xmax=xmax)
assert xmin == sp.xmin
assert xmax == sp.xmax
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
# shift
zoom_xmin = 7661
zoom_xmax = 7679
shift = 0.05
sp.shift(shift=shift, zoom_xmin=zoom_xmin, zoom_xmax=zoom_xmax)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
if __name__ == "__main__":
filename = "HD170740_w860_redl_20140915_O12.fits"
testEdiblesSpectrum(filename=filename)
| import astropy
import datetime
import numpy as np
from edibles.utils.edibles_spectrum import EdiblesSpectrum
def testEdiblesSpectrum(filename="tests/HD170740_w860_redl_20140915_O12.fits"):
# Spectrum information
sp = EdiblesSpectrum(filename=filename, fully_featured=True, noDATADIR=True)
assert isinstance(sp.header, astropy.io.fits.header.Header)
assert isinstance(sp.target, str)
assert isinstance(sp.date, str)
assert isinstance(sp.datetime, datetime.datetime)
assert isinstance(sp.v_bary, float)
assert isinstance(sp.wave_units, str)
assert isinstance(sp.flux_units, str)
# Raw
assert isinstance(sp.raw_wave, np.ndarray)
assert isinstance(sp.raw_bary_wave, np.ndarray)
assert isinstance(sp.raw_flux, np.ndarray)
assert len(sp.raw_wave) == len(sp.raw_bary_wave)
assert len(sp.raw_wave) == len(sp.raw_flux)
assert isinstance(sp.raw_grid, np.ndarray)
assert len(sp.raw_grid) == 200443 # print(len(sp.raw_grid))
assert isinstance(sp.raw_sky_wave, np.ndarray)
assert isinstance(sp.raw_sky_flux, np.ndarray)
assert len(sp.raw_sky_wave) == len(sp.raw_sky_flux)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
# getSpectrum
xmin = 7660
xmax = 7680
sp.getSpectrum(xmin=xmin, xmax=xmax)
assert xmin == sp.xmin
assert xmax == sp.xmax
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
# shift
zoom_xmin = 7661
zoom_xmax = 7679
shift = 0.05
sp.shift(shift=shift, zoom_xmin=zoom_xmin, zoom_xmax=zoom_xmax)
assert isinstance(sp.wave, np.ndarray)
assert isinstance(sp.flux, np.ndarray)
assert len(sp.wave) == len(sp.flux)
assert np.min(sp.wave) > sp.xmin
assert np.max(sp.wave) < sp.xmax
assert isinstance(sp.bary_wave, np.ndarray)
assert isinstance(sp.bary_flux, np.ndarray)
assert len(sp.bary_wave) == len(sp.bary_flux)
assert np.min(sp.bary_wave) > sp.xmin
assert np.max(sp.bary_wave) < sp.xmax
assert isinstance(sp.grid, np.ndarray)
assert isinstance(sp.interp_flux, np.ndarray)
assert isinstance(sp.interp_bary_flux, np.ndarray)
assert len(sp.grid) == len(sp.interp_flux)
assert len(sp.grid) == len(sp.interp_bary_flux)
assert np.min(sp.grid) > sp.xmin
assert np.max(sp.grid) < sp.xmax
assert isinstance(sp.sky_wave, np.ndarray)
assert isinstance(sp.sky_flux, np.ndarray)
assert len(sp.sky_wave) == len(sp.sky_flux)
assert np.min(sp.sky_wave) > sp.xmin
assert np.max(sp.sky_wave) < sp.xmax
if __name__ == "__main__":
filename = "HD170740_w860_redl_20140915_O12.fits"
testEdiblesSpectrum(filename=filename) | en | 0.11399 | # Spectrum information # Raw # print(len(sp.raw_grid)) # getSpectrum # shift | 2.046045 | 2 |
swift/common/ondisk.py | citrix-openstack-build/swift | 1 | 475 | <reponame>citrix-openstack-build/swift
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods & Attributes for shared 'on-disk' data layouts."""
import os
import sys
import errno
from hashlib import md5
from random import shuffle
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift import gettext_ as _
from swift.common.utils import listdir, quote
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
_hash_conf = ConfigParser()
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
if _hash_conf.read('/etc/swift/swift.conf'):
try:
HASH_PATH_SUFFIX = _hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = _hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
def validate_configuration():
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
sys.exit("Error: [swift-hash]: both swift_hash_path_suffix "
"and swift_hash_path_prefix are missing "
"from /etc/swift/swift.conf")
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account]
if container:
paths.append(container)
if object:
paths.append(object)
if raw_digest:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).digest()
else:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).hexdigest()
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return "%016.05f" % (float(timestamp))
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises: ValueError if given an invalid device or partition
"""
invalid_device = False
invalid_partition = False
if not device or '/' in device or device in ['.', '..']:
invalid_device = True
if not partition or '/' in partition or partition in ['.', '..']:
invalid_partition = True
if invalid_device:
raise ValueError('Invalid device: %s' % quote(device or ''))
elif invalid_partition:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None):
'''
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
'''
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
for device in device_dir:
if mount_check and not \
os.path.ismount(os.path.join(devices, device)):
if logger:
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, datadir)
partitions = listdir(datadir_path)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
| # Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods & Attributes for shared 'on-disk' data layouts."""
import os
import sys
import errno
from hashlib import md5
from random import shuffle
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift import gettext_ as _
from swift.common.utils import listdir, quote
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
_hash_conf = ConfigParser()
HASH_PATH_SUFFIX = ''
HASH_PATH_PREFIX = ''
if _hash_conf.read('/etc/swift/swift.conf'):
try:
HASH_PATH_SUFFIX = _hash_conf.get('swift-hash',
'swift_hash_path_suffix')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = _hash_conf.get('swift-hash',
'swift_hash_path_prefix')
except (NoSectionError, NoOptionError):
pass
def validate_configuration():
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
sys.exit("Error: [swift-hash]: both swift_hash_path_suffix "
"and swift_hash_path_prefix are missing "
"from /etc/swift/swift.conf")
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account]
if container:
paths.append(container)
if object:
paths.append(object)
if raw_digest:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).digest()
else:
return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths)
+ HASH_PATH_SUFFIX).hexdigest()
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return "%016.05f" % (float(timestamp))
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises: ValueError if given an invalid device or partition
"""
invalid_device = False
invalid_partition = False
if not device or '/' in device or device in ['.', '..']:
invalid_device = True
if not partition or '/' in partition or partition in ['.', '..']:
invalid_partition = True
if invalid_device:
raise ValueError('Invalid device: %s' % quote(device or ''))
elif invalid_partition:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None):
'''
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
'''
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
for device in device_dir:
if mount_check and not \
os.path.ismount(os.path.join(devices, device)):
if logger:
logger.debug(
_('Skipping %s as it is not mounted'), device)
continue
datadir_path = os.path.join(devices, device, datadir)
partitions = listdir(datadir_path)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition | en | 0.810318 | # Copyright (c) 2010-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. Methods & Attributes for shared 'on-disk' data layouts. # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises: ValueError if given an invalid device or partition Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory Given a devices path and a data directory, yield (path, device, partition) for all files in that directory :param devices: parent directory of the devices to be audited :param datadir: a directory located under self.devices. This should be one of the DATADIR constants defined in the account, container, and object servers. :param suffix: path name suffix required for all names returned :param mount_check: Flag to check if a mount check should be performed on devices :param logger: a logger object # randomize devices in case of process restart before sweep completed | 2.169858 | 2 |
.OLD_FILES/dossiers2_old1/custom/cache.py | KIHestad/WoT-Dossier-Parser-Create-Struct | 0 | 476 | # uncompyle6 version 2.11.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)]
# Embedded file name: scripts/common/dossiers2/custom/cache.py
import nations
from items import vehicles
def getCache():
global _g_cache
return _g_cache
def buildCache():
vehiclesByLevel = {}
vehiclesByTag = {'beast': set(),'sinai': set(),'patton': set()}
vehiclesInTreeByNation = {}
vehiclesInTree = set()
nationsWithVehiclesInTree = []
unlocksSources = vehicles.getUnlocksSources()
for nationIdx in xrange(len(nations.NAMES)):
nationList = vehicles.g_list.getList(nationIdx)
vehiclesInNationTree = set()
for vehDescr in nationList.itervalues():
vehiclesByLevel.setdefault(vehDescr.level, set()).add(vehDescr.compactDescr)
for tag in ('beast', 'sinai', 'patton'):
if tag in vehDescr.tags:
vehiclesByTag[tag].add(vehDescr.compactDescr)
if len(unlocksSources.get(vehDescr.compactDescr, set())) > 0 or len(vehicles.g_cache.vehicle(nationIdx, vehDescr.id).unlocksDescrs) > 0:
vehiclesInNationTree.add(vehDescr.compactDescr)
vehiclesInTree.update(vehiclesInNationTree)
vehiclesInTreeByNation[nationIdx] = vehiclesInNationTree
if bool(vehiclesInNationTree):
nationsWithVehiclesInTree.append(nationIdx)
vehicles8p = vehiclesByLevel[8] | vehiclesByLevel[9] | vehiclesByLevel[10]
_g_cache.update({'vehiclesByLevel': vehiclesByLevel,
'vehicles8+': vehicles8p,
'vehiclesByTag': vehiclesByTag,
'mausTypeCompDescr': vehicles.makeVehicleTypeCompDescrByName('germany:G42_Maus'),
'vehiclesInTreesByNation': vehiclesInTreeByNation,
'vehiclesInTrees': vehiclesInTree,
'nationsWithVehiclesInTree': nationsWithVehiclesInTree
})
_g_cache = {} | # uncompyle6 version 2.11.3
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)]
# Embedded file name: scripts/common/dossiers2/custom/cache.py
import nations
from items import vehicles
def getCache():
global _g_cache
return _g_cache
def buildCache():
vehiclesByLevel = {}
vehiclesByTag = {'beast': set(),'sinai': set(),'patton': set()}
vehiclesInTreeByNation = {}
vehiclesInTree = set()
nationsWithVehiclesInTree = []
unlocksSources = vehicles.getUnlocksSources()
for nationIdx in xrange(len(nations.NAMES)):
nationList = vehicles.g_list.getList(nationIdx)
vehiclesInNationTree = set()
for vehDescr in nationList.itervalues():
vehiclesByLevel.setdefault(vehDescr.level, set()).add(vehDescr.compactDescr)
for tag in ('beast', 'sinai', 'patton'):
if tag in vehDescr.tags:
vehiclesByTag[tag].add(vehDescr.compactDescr)
if len(unlocksSources.get(vehDescr.compactDescr, set())) > 0 or len(vehicles.g_cache.vehicle(nationIdx, vehDescr.id).unlocksDescrs) > 0:
vehiclesInNationTree.add(vehDescr.compactDescr)
vehiclesInTree.update(vehiclesInNationTree)
vehiclesInTreeByNation[nationIdx] = vehiclesInNationTree
if bool(vehiclesInNationTree):
nationsWithVehiclesInTree.append(nationIdx)
vehicles8p = vehiclesByLevel[8] | vehiclesByLevel[9] | vehiclesByLevel[10]
_g_cache.update({'vehiclesByLevel': vehiclesByLevel,
'vehicles8+': vehicles8p,
'vehiclesByTag': vehiclesByTag,
'mausTypeCompDescr': vehicles.makeVehicleTypeCompDescrByName('germany:G42_Maus'),
'vehiclesInTreesByNation': vehiclesInTreeByNation,
'vehiclesInTrees': vehiclesInTree,
'nationsWithVehiclesInTree': nationsWithVehiclesInTree
})
_g_cache = {} | en | 0.588811 | # uncompyle6 version 2.11.3 # Python bytecode 2.7 (62211) # Decompiled from: Python 2.7.10 (default, May 23 2015, 09:40:32) [MSC v.1500 32 bit (Intel)] # Embedded file name: scripts/common/dossiers2/custom/cache.py | 2.289787 | 2 |
src/parser.py | harkiratbehl/PyGM | 2 | 477 | <gh_stars>1-10
#!/usr/bin/python
from code import TreeNode
from code import ThreeAddressCode
from lexer import tokens
from random import *
from symbol_table import SymbolTable
from symbol_table import SymbolTableNode
import logging
import ply.lex as lex
import ply.yacc as yacc
import sys
from codegen import convert_tac
from code import Code
from codegen import generate_assembly
three_addr_code = ThreeAddressCode()
assembly_code = Code()
parsed = []
symbol_table = SymbolTable()
var_list = []
generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []}
def gen(s):
if s not in generated.keys():
generated[s] = []
temp = s + '_' + str(len(generated[s]))
generated[s] += [temp]
return temp
def print_error(err):
print "*** Error: " + err + "! ***"
sys.exit(1)
def check_variable(TreeNode):
# return 2 values. first is the name for the variable, second is 0 if variable not found
# TreeNode.print_node()
# symbol_table.print_symbol_table()
if TreeNode.isLvalue == 1:
if TreeNode.data not in generated['temp']:
name = symbol_table.search_identifier(TreeNode.data)
if name == False:
name = symbol_table.search_function(TreeNode.data)
if name == False:
print_error("Variable " + TreeNode.data + " is undefined")
return TreeNode.data
else:
return name
else:
newNode = SymbolTableNode(name, TreeNode.input_type)
symbol_table.add_var(newNode)
if TreeNode.children == []:
return name
else:
return name + '[' + TreeNode.children + ']'
else:
newNode = SymbolTableNode(TreeNode.data, TreeNode.input_type)
symbol_table.add_var(newNode)
return TreeNode.data
else:
if TreeNode.input_type != 'STRING':
return TreeNode.data
else:
TreeNode.print_node()
return TreeNode.data
precedence = (
('left','IDENTIFIER'),
('right','ASSIGN_OP'),
('left','COMMA'),
('left','LSQUARE'),
('left','RSQUARE'),
('left','LCURLY'),
('left','RCURLY'),
('left','DDD'),
('left','DOT'),
('left','SEMICOLON'),
('left','COLON'),
('left','SINGLE_QUOTES'),
('left','DOUBLE_QUOTES'),
('left','DECIMAL_LIT'),
('left','OCTAL_LIT'),
('left','HEX_LIT'),
('left','FLOAT_LIT'),
('left','STRING_LIT'),
('left','NEWLINE'),
('left','BREAK'),
('left','CONTINUE'),
('left','RETURN'),
('left','RROUND'),
('left','LROUND'),
('left', 'OR_OR'),
('left', 'AMP_AMP'),
('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'),
('left', 'PLUS', 'MINUS','OR','CARET'),
('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'),
)
def p_SourceFile(p):
'''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList
'''
parsed.append(p.slice)
# TODO: Ignoring package name and Imports for now
p[0] = p[5]
var_list = symbol_table.make_var_list()
three_addr_code = convert_tac(p[0].TAC)
symbol_table.fill_next_use(three_addr_code)
assembly_code = generate_assembly(three_addr_code,var_list,symbol_table)
# p[0].TAC.print_code()
# three_addr_code.print_code()
assembly_code.print_code()
# symbol_table.print_symbol_table()
return
def p_ImportDeclList(p):
'''ImportDeclList : ImportDecl SEMICOLON ImportDeclList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_TopLevelDeclList(p):
'''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
if p[3] != None:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC)
return
def p_TopLevelDecl(p):
'''TopLevelDecl : Declaration
| FunctionDecl
'''
parsed.append(p.slice)
p[0] = p[1]
return
def p_ImportDecl(p):
'''ImportDecl : IMPORT LROUND ImportSpecList RROUND
| IMPORT ImportSpec
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpecList(p):
'''ImportSpecList : ImportSpec SEMICOLON ImportSpecList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpec(p):
'''ImportSpec : DOT string_lit
| IDENTIFIER string_lit
| empty string_lit
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_Block(p):
'''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY
'''
parsed.append(p.slice)
p[0] = p[3]
p[0].data = p[2].data
p[0].name = 'Block'
return
def p_ScopeStart(p):
'''ScopeStart : empty
'''
parsed.append(p.slice)
symbol_table.add_scope(gen('scope'))
p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None')
return
def p_ScopeEnd(p):
'''ScopeEnd : empty
'''
parsed.append(p.slice)
symbol_table.end_scope()
return
def p_StatementList(p):
'''StatementList : Statement SEMICOLON StatementList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('StatementList', 0, 'INT')
return
def p_Statement(p):
'''Statement : Declaration
| SimpleStmt
| ReturnStmt
| Block
| IfStmt
| SwitchStmt
| ForStmt
| BreakStmt
| ContinueStmt
| GotoStmt
| PrintIntStmt
| PrintStrStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Statement'
return
def p_PrintIntStmt(p):
'''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND
| PRINTLN LROUND int_lit RROUND
'''
if hasattr(p[3], 'name') and p[3].name == 'int_lit':
p[0] = p[3]
# p[0].isLvalue = 0
else:
p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, [])
p[0].TAC.add_line(['print_int', check_variable(p[0]), '', ''])
p[0].name = 'PrintIntStmt'
return
def p_PrintStrStmt(p):
'''PrintStrStmt : PRINTLN LROUND string_lit RROUND
'''
p[0] = p[3]
name = symbol_table.current_scope + '_' + gen('str_list')
parametersNode = SymbolTableNode(p[3].data, p[3].input_type)
newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode])
symbol_table.add_var(newNode)
p[0].TAC.add_line(['print_str', name, '', ''])
p[0].name = 'PrintStrStmt'
return
def p_Declaration(p):
'''Declaration : ConstDecl
| TypeDecl
| VarDecl
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Declaration'
return
def p_ConstDecl(p):
'''ConstDecl : CONST LROUND ConstSpecList RROUND
| CONST ConstSpec
'''
parsed.append(p.slice)
return
def p_ConstSpecList(p):
'''ConstSpecList : empty
| ConstSpecList ConstSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_ConstSpec(p):
'''ConstSpec : IDENTIFIER
| IdentifierList
| IDENTIFIER EQ Expression
| IdentifierList EQ ExpressionList
| IDENTIFIER Type EQ Expression
| IdentifierList Type EQ ExpressionList
'''
parsed.append(p.slice)
return
def p_IdentifierList(p):
'''IdentifierList : IDENTIFIER COMMA IdentifierBotList
'''
parsed.append(p.slice)
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_IdentifierBotList(p):
'''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList
| IDENTIFIER
'''
parsed.append(p.slice)
if len(p) == 2:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node])
elif len(p) == 4:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_ExpressionList(p):
'''ExpressionList : Expression COMMA ExpressionBotList
'''
parsed.append(p.slice)
p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ExpressionBotList(p):
'''ExpressionBotList : Expression COMMA ExpressionBotList
| Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC)
elif len(p) == 4:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_TypeDecl(p):
'''TypeDecl : TYPE TypeSpecTopList
'''
parsed.append(p.slice)
return
def p_TypeSpecTopList(p):
'''TypeSpecTopList : TypeSpec
| LROUND TypeSpecList RROUND
'''
parsed.append(p.slice)
return
def p_TypeSpecList(p):
'''TypeSpecList : empty
| TypeSpecList TypeSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_TypeSpec(p):
'''TypeSpec : AliasDecl
| TypeDef
'''
parsed.append(p.slice)
return
def p_AliasDecl(p):
'''AliasDecl : IDENTIFIER EQ Type
'''
parsed.append(p.slice)
return
def p_TypeDef(p):
'''TypeDef : IDENTIFIER Type
'''
parsed.append(p.slice)
return
def p_Type(p):
'''Type : TypeLit
| StandardTypes
| LROUND Type RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Type'
return
def p_StandardTypes(p):
'''StandardTypes : PREDEFINED_TYPES
'''
parsed.append(p.slice)
p[0] = TreeNode('StandardTypes', p[1], 'NONE')
return
def p_TypeLit(p):
'''TypeLit : ArrayType
| StructType
| FunctionType
| PointerType
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'TypeLit'
return
def p_PointerType(p):
'''PointerType : STAR Type
'''
parsed.append(p.slice)
return
def p_ArrayType(p):
'''ArrayType : LSQUARE ArrayLength RSQUARE Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ArrayType', p[2].data, p[4].data)
return
def p_ArrayLength(p):
'''ArrayLength : Expression
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'ArrayLength'
return
def p_StructType(p):
'''StructType : STRUCT LCURLY FieldDeclList RCURLY
'''
parsed.append(p.slice)
return
def p_FieldDeclList(p):
'''FieldDeclList : empty
| FieldDeclList FieldDecl SEMICOLON
'''
parsed.append(p.slice)
return
def p_FieldDecl(p):
'''FieldDecl : IdentifierList Type TagTop
| IDENTIFIER Type TagTop
'''
parsed.append(p.slice)
return
def p_TagTop(p):
'''TagTop : empty
| Tag
'''
parsed.append(p.slice)
return
def p_Tag(p):
'''Tag : string_lit
'''
parsed.append(p.slice)
return
def p_FunctionType(p):
'''FunctionType : FUNC Signature
'''
parsed.append(p.slice)
return
def p_Signature(p):
'''Signature : Parameters
| Parameters Result
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Signature'
s = 'scope_' + str(len(generated['scope']))
symbol_table.new_scope(s)
for child in p[1].children:
symbol_table.add_identifier(child, s)
newNode = SymbolTableNode(s + '_' + child.data, child.input_type)
symbol_table.add_var(newNode, s)
# symbol_table.print_symbol_table()
if len(p) == 2:
p[0].input_type = TreeNode('Result', 0, 'None')
else:
p[0].input_type = p[2]
return
def p_Result(p):
'''Result : Parameters
| Type
'''
parsed.append(p.slice)
if p[1].name == 'Type':
p[0] = TreeNode('Result', 1, 'None', 0, [p[1]])
else:
p[0] = p[1]
p[0].name = 'Result'
return
def p_Parameters(p):
'''Parameters : LROUND RROUND
| LROUND ParameterList RROUND
'''
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Parameters', 0, 'None')
else:
p[0] = p[2]
p[0].name = 'Parameters'
return
def p_ParameterList(p):
'''ParameterList : ParameterDecl
| ParameterList COMMA ParameterDecl
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
p[0].name = 'ParameterList'
elif len(p) == 4:
p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ParameterDecl(p):
'''ParameterDecl : IdentifierList Type
| IDENTIFIER Type
| Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ParameterDecl', 0, 'None')
if len(p) == 3:
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
for node in p[1].children:
p[0].data += 1
node.input_type = p[2].data
p[0].children += [node]
else:
node = TreeNode('IDENTIFIER', p[1], p[2].data, 1)
p[0].data += 1
p[0].children += [node]
else:
p[0].data += 1
p[0].children += [p[1]]
return
def p_VarDecl(p):
'''VarDecl : VAR VarSpecTopList
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'VarDecl'
return
def p_VarSpecTopList(p):
'''VarSpecTopList : VarSpec
| LROUND VarSpecList RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'VarSpecTopList'
return
def p_VarSpecList(p):
'''VarSpecList : empty
| VarSpecList VarSpec SEMICOLON
'''
return
def p_VarSpec(p):
'''VarSpec : IDENTIFIER Type
| IDENTIFIER EQ Expression
| IDENTIFIER Type EQ Expression
| IdentifierList Type
| IdentifierList EQ ExpressionList
| IdentifierList Type EQ ExpressionList
'''
# Insert into symbol table
p[0] = TreeNode('VarSpec', 0, 'NONE')
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
zero_val = TreeNode('decimal_lit', 0, 'INT')
# l1 = len(p[1].children)
# if len(p) == 3:
# expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1)
# elif len(p) == 4:
# expr_list = p[3]
# elif len(p) == 5:
# expr_list = p[4]
# l2 = len(expr_list.children)
# p[0].TAC.append_TAC(expr_list.TAC)
# p[0].TAC.append_TAC(p[1].TAC)
# if l1 == l2:
# for i in range(l1):
# p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, ''])
# else:
# print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
else:
p[1] = TreeNode('IDENTIFIER',p[1],'INT',1)
if p[2].input_type != 'NONE':
# array case
# p[2].print_node()
if symbol_table.add_identifier(p[1], size = p[2].data) == False:
print_error("Unable to add to SymbolTable")
return
name = symbol_table.search_identifier(p[1].data)
newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data)
symbol_table.add_var(newNode)
p[0] = TreeNode('VarSpec',p[1].data,'INT')
# expr = TreeNode('Expr', 0, 'NONE')
# if len(p) == 4:
# expr = p[3]
# p[0].TAC.append_TAC(p[3].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
# elif len(p) == 5:
# expr = p[4]
# p[0].TAC.append_TAC(p[4].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
return
def p_FunctionDecl(p):
'''FunctionDecl : FUNC FunctionName Signature
| FUNC FunctionName Signature FunctionBody
'''
parsed.append(p.slice)
# symbol_table.print_symbol_table()
p[0] = TreeNode('FunctionDecl', 0, 'INT')
# print symbol_table.current_scope
# p[4].TAC.print_code()
symbol_table.add_function(p[2].data, p[3].input_type, p[3].children)
if len(p) == 5:
noOfParams = 0
for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']:
if f.name == p[2].data:
noOfParams = len(f.parameters)
p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), ''])
for child in reversed(p[3].children):
p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', ''])
p[0].TAC.add_line(['stack_push', '', '', ''])
p[0].TAC.append_TAC(p[4].TAC)
return
def p_FunctionName(p):
'''FunctionName : IDENTIFIER
'''
parsed.append(p.slice)
p[0] = TreeNode('FunctionName', p[1], 'INT', 1)
return
def p_FunctionBody(p):
'''FunctionBody : Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'FunctionBody'
return
def p_SimpleStmt(p):
'''SimpleStmt : Expression
| Assignment
| ShortVarDecl
| IncDecStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'SimpleStmt'
return
def p_IncDecStmt(p):
'''IncDecStmt : Expression PLUS_PLUS
| Expression MINUS_MINUS
'''
parsed.append(p.slice)
one_val = TreeNode('IncDecStmt', '1', 'INT')
p[0] = p[1]
if p[1].isLvalue == 1:
if p[2] == '++':
p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
print_error("Lvalue required")
p[0].name = 'IncDecStmt'
return
def p_ShortVarDecl(p):
'''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList
| Expression ASSIGN_OP Expression
'''
parsed.append(p.slice)
# TODO: Add in symbol table
p[0] = TreeNode('ShortVarDecl', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1].children[i]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), ''])
return
def p_Assignment(p):
'''Assignment : ExpressionList assign_op ExpressionList
| Expression assign_op Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('Assignment', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']:
print_error("Variable " + p[1].children[i].data + " is undefined")
return
if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']:
print_error("Variable " + p[3].children[i].data + " is undefined")
return
p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']:
print_error("Variable " + p[1].data + " is undefined")
return
if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']:
print_error("Variable " + p[3].data + " is undefined")
return
# print symbol_table.current_scope
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), ''])
return
def p_assign_op(p):
'''assign_op : EQ
| PLUS_EQ
| MINUS_EQ
| OR_EQ
| CARET_EQ
| STAR_EQ
| DIVIDE_EQ
| MODULO_EQ
| LS_EQ
| RS_EQ
| AMP_EQ
| AND_OR_EQ
'''
parsed.append(p.slice)
p[0] = TreeNode('assign_op', p[1], 'OPERATOR')
return
def p_IfStmt(p):
'''IfStmt : IF Expression Block
| IF Expression Block ELSE elseTail
'''
parsed.append(p.slice)
if len(p) == 4:
l1 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
if len(p) == 6:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[5].TAC)
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_elseTail(p):
'''elseTail : IfStmt
| Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'elseTail'
return
def p_SwitchStmt(p):
'''SwitchStmt : ExprSwitchStmt
'''
parsed.append(p.slice)
p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC)
return
def p_ExprSwitchStmt(p):
'''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
'''
parsed.append(p.slice)
if len(p) == 8:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('ExprSwitchStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), ''])
p[0].TAC.append_TAC(p[5].data)
for i in range(len(p[5].children)):
p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]])
p[0].TAC.add_line(['goto', l2, '', ''])
for i in range(p[5].TAC.length()):
if i in p[5].TAC.leaders[1:]:
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(p[5].TAC.code[i])
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_ExprCaseClauseList(p):
'''ExprCaseClauseList : empty
| ExprCaseClauseList ExprCaseClause
'''
parsed.append(p.slice)
TAC1 = ThreeAddressCode()
TAC2 = ThreeAddressCode()
if len(p) == 3:
TAC1 = p[1].data
TAC2 = p[2].data
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC)
p[0].TAC.add_leader(p[0].TAC.length())
p[0].TAC.append_TAC(p[2].TAC)
p[0].data.append_TAC(TAC2)
else:
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT')
return
def p_ExprCaseClause(p):
'''ExprCaseClause : ExprSwitchCase COLON StatementList
'''
parsed.append(p.slice)
l1 = gen('label')
p[0] = TreeNode('ExprCaseClause', 0, 'INT')
# p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
# p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].children = [[p[1].data,l1]]
p[0].data = p[1].TAC
return
def p_ExprSwitchCase(p):
'''ExprSwitchCase : CASE ExpressionList
| DEFAULT
| CASE Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('ExprSwitchCase', 0, 'INT')
if len(p) == 3:
p[0].data = p[2].data
p[0].TAC = p[2].TAC
return
def p_ForStmt(p):
'''ForStmt : FOR Expression Block
| FOR Block
'''
parsed.append(p.slice)
p[0] = TreeNode('ForStmt', 0, 'INT')
if len(p) == 4:
l1 = gen('label')
l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
p[0].TAC.add_line(['label', l2, '', ''])
if len(p) == 3:
l1 = gen('label')
# l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
# p[0].TAC.add_line([l2])
return
def p_ReturnStmt(p):
'''ReturnStmt : RETURN
| RETURN Expression
| RETURN ExpressionList
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ReturnStmt', 0, 'None')
p[0].TAC.add_line(['return', '', '', ''])
if len(p) == 3:
if p[2].name == 'Expression':
p[0] = p[2]
p[0].name = 'ReturnStmt'
p[0].TAC.add_line(['return', check_variable(p[2]), '', ''])
return
def p_BreakStmt(p):
'''BreakStmt : BREAK IDENTIFIER
'''
parsed.append(p.slice)
return
def p_ContinueStmt(p):
'''ContinueStmt : CONTINUE IDENTIFIER
'''
parsed.append(p.slice)
return
def p_GotoStmt(p):
'''GotoStmt : GOTO IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Expression(p):
'''Expression : UnaryExpr
| Expression OR_OR Expression
| Expression AMP_AMP Expression
| Expression EQ_EQ Expression
| Expression NOT_EQ Expression
| Expression LT Expression
| Expression LT_EQ Expression
| Expression GT Expression
| Expression GT_EQ Expression
| Expression PLUS Expression
| Expression MINUS Expression
| Expression OR Expression
| Expression CARET Expression
| Expression STAR Expression
| Expression DIVIDE Expression
| Expression MODULO Expression
| Expression LS Expression
| Expression RS Expression
| Expression AMP Expression
| Expression AND_OR Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])])
p[0].name = 'Expression'
return
def p_UnaryExpr(p):
'''UnaryExpr : PrimaryExpr
| unary_op UnaryExpr
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), ''])
p[0].name = 'UnaryExpr'
return
def p_unary_op(p):
'''unary_op : PLUS
| MINUS
| NOT
| CARET
| STAR
| AMP
| LT_MINUS
'''
parsed.append(p.slice)
p[0] = TreeNode('unary_op', p[1], 'OPERATOR')
return
def p_PrimaryExpr(p):
'''PrimaryExpr : Operand
| IDENTIFIER
| PrimaryExpr Selector
| PrimaryExpr Index
| PrimaryExpr Arguments
'''
parsed.append(p.slice)
if len(p) == 2:
if p.slice[1].type == 'IDENTIFIER':
p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1)
elif p[1].name == 'Operand':
p[0] = p[1]
elif len(p) == 3:
if p[2].name == 'Index':
p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data)
elif p[2].name == 'Arguments':
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.append_TAC(p[2].TAC)
# p[1].print_node()
func = check_variable(p[1]).split("_")
scope, funcName = "_".join(func[:2]), "_".join(func[2:])
temp = 0
for f in symbol_table.symbol_table[scope]['functions']:
if f.name == funcName:
temp = len(f.parameters)
# p[2].print_node()
for child in p[2].children:
p[0].TAC.add_line(['putparam', check_variable(child), '', ''])
if temp != p[2].data:
print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied')
p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), ''])
p[0].TAC.add_line(['return_value', check_variable(p[0]), '', ''])
p[0].name = 'PrimaryExpr'
return
def p_Operand(p):
'''Operand : Literal
| LROUND Expression RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Operand'
return
def p_Literal(p):
'''Literal : BasicLit
| FunctionLit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Literal'
return
def p_BasicLit(p):
'''BasicLit : int_lit
| float_lit
| string_lit
| rune_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'BasicLit'
return
def p_int_lit(p):
'''int_lit : decimal_lit
| octal_lit
| hex_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'int_lit'
return
def p_decimal_lit(p):
'''decimal_lit : DECIMAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('decimal_lit', p[1], 'INT')
return
def p_octal_lit(p):
'''octal_lit : OCTAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('octal_lit', p[1], 'OCT')
return
def p_hex_lit(p):
'''hex_lit : HEX_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('hex_lit', p[1], 'HEX')
return
def p_float_lit(p):
'''float_lit : FLOAT_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('float_lit', p[1], 'FLOAT')
return
def p_FunctionLit(p):
'''FunctionLit : FUNC Signature FunctionBody
'''
parsed.append(p.slice)
# Anonymous Function
# Not implemented yet
return
def p_Selector(p):
'''Selector : DOT IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Index(p):
'''Index : LSQUARE Expression RSQUARE
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'Index'
return
def p_Arguments(p):
'''Arguments : LROUND RROUND
| LROUND ExpressionList RROUND
| LROUND Expression RROUND
| LROUND Type RROUND
| LROUND Type COMMA ExpressionList RROUND
| LROUND Type COMMA Expression RROUND
'''
# print p.slice
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Arguments', 0, 'None')
if len(p) == 4:
if p[2].name == 'Expression':
p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC)
if p[2].name == 'ExpressionList':
p[0] = p[2]
p[0].name = 'Arguments'
p[0].data = len(p[2].children)
return
def p_string_lit(p):
'''string_lit : STRING_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('string_lit', p[1], 'STRING')
return
def p_rune_lit(p):
'''rune_lit : RUNE_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('rune_lit', p[1], 'RUNE')
return
def p_empty(p):
'empty :'
pass
def p_error(p):
print p
if p == None:
print str(sys.argv[1]) + " :: You missed something at the end"
else:
print str(sys.argv[1]) + " :: Syntax error in line no " + str(p.lineno)
# Standard Logger
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
yacc.yacc(debug=True, debuglog=log)
input_file = sys.argv[1]
import os
if os.path.isfile(input_file) is False:
print('Input file ' + input_file + ' does not exist')
sys.exit(1)
input_code = open(input_file, 'r').read()
if input_code[len(input_code)-1] != '\n':
input_code += '\n'
yacc.parse(input_code, debug=log, tracking=True)
| #!/usr/bin/python
from code import TreeNode
from code import ThreeAddressCode
from lexer import tokens
from random import *
from symbol_table import SymbolTable
from symbol_table import SymbolTableNode
import logging
import ply.lex as lex
import ply.yacc as yacc
import sys
from codegen import convert_tac
from code import Code
from codegen import generate_assembly
three_addr_code = ThreeAddressCode()
assembly_code = Code()
parsed = []
symbol_table = SymbolTable()
var_list = []
generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []}
def gen(s):
if s not in generated.keys():
generated[s] = []
temp = s + '_' + str(len(generated[s]))
generated[s] += [temp]
return temp
def print_error(err):
print "*** Error: " + err + "! ***"
sys.exit(1)
def check_variable(TreeNode):
# return 2 values. first is the name for the variable, second is 0 if variable not found
# TreeNode.print_node()
# symbol_table.print_symbol_table()
if TreeNode.isLvalue == 1:
if TreeNode.data not in generated['temp']:
name = symbol_table.search_identifier(TreeNode.data)
if name == False:
name = symbol_table.search_function(TreeNode.data)
if name == False:
print_error("Variable " + TreeNode.data + " is undefined")
return TreeNode.data
else:
return name
else:
newNode = SymbolTableNode(name, TreeNode.input_type)
symbol_table.add_var(newNode)
if TreeNode.children == []:
return name
else:
return name + '[' + TreeNode.children + ']'
else:
newNode = SymbolTableNode(TreeNode.data, TreeNode.input_type)
symbol_table.add_var(newNode)
return TreeNode.data
else:
if TreeNode.input_type != 'STRING':
return TreeNode.data
else:
TreeNode.print_node()
return TreeNode.data
precedence = (
('left','IDENTIFIER'),
('right','ASSIGN_OP'),
('left','COMMA'),
('left','LSQUARE'),
('left','RSQUARE'),
('left','LCURLY'),
('left','RCURLY'),
('left','DDD'),
('left','DOT'),
('left','SEMICOLON'),
('left','COLON'),
('left','SINGLE_QUOTES'),
('left','DOUBLE_QUOTES'),
('left','DECIMAL_LIT'),
('left','OCTAL_LIT'),
('left','HEX_LIT'),
('left','FLOAT_LIT'),
('left','STRING_LIT'),
('left','NEWLINE'),
('left','BREAK'),
('left','CONTINUE'),
('left','RETURN'),
('left','RROUND'),
('left','LROUND'),
('left', 'OR_OR'),
('left', 'AMP_AMP'),
('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'),
('left', 'PLUS', 'MINUS','OR','CARET'),
('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'),
)
def p_SourceFile(p):
'''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList
'''
parsed.append(p.slice)
# TODO: Ignoring package name and Imports for now
p[0] = p[5]
var_list = symbol_table.make_var_list()
three_addr_code = convert_tac(p[0].TAC)
symbol_table.fill_next_use(three_addr_code)
assembly_code = generate_assembly(three_addr_code,var_list,symbol_table)
# p[0].TAC.print_code()
# three_addr_code.print_code()
assembly_code.print_code()
# symbol_table.print_symbol_table()
return
def p_ImportDeclList(p):
'''ImportDeclList : ImportDecl SEMICOLON ImportDeclList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_TopLevelDeclList(p):
'''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
if p[3] != None:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC)
return
def p_TopLevelDecl(p):
'''TopLevelDecl : Declaration
| FunctionDecl
'''
parsed.append(p.slice)
p[0] = p[1]
return
def p_ImportDecl(p):
'''ImportDecl : IMPORT LROUND ImportSpecList RROUND
| IMPORT ImportSpec
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpecList(p):
'''ImportSpecList : ImportSpec SEMICOLON ImportSpecList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpec(p):
'''ImportSpec : DOT string_lit
| IDENTIFIER string_lit
| empty string_lit
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_Block(p):
'''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY
'''
parsed.append(p.slice)
p[0] = p[3]
p[0].data = p[2].data
p[0].name = 'Block'
return
def p_ScopeStart(p):
'''ScopeStart : empty
'''
parsed.append(p.slice)
symbol_table.add_scope(gen('scope'))
p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None')
return
def p_ScopeEnd(p):
'''ScopeEnd : empty
'''
parsed.append(p.slice)
symbol_table.end_scope()
return
def p_StatementList(p):
'''StatementList : Statement SEMICOLON StatementList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('StatementList', 0, 'INT')
return
def p_Statement(p):
'''Statement : Declaration
| SimpleStmt
| ReturnStmt
| Block
| IfStmt
| SwitchStmt
| ForStmt
| BreakStmt
| ContinueStmt
| GotoStmt
| PrintIntStmt
| PrintStrStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Statement'
return
def p_PrintIntStmt(p):
'''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND
| PRINTLN LROUND int_lit RROUND
'''
if hasattr(p[3], 'name') and p[3].name == 'int_lit':
p[0] = p[3]
# p[0].isLvalue = 0
else:
p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, [])
p[0].TAC.add_line(['print_int', check_variable(p[0]), '', ''])
p[0].name = 'PrintIntStmt'
return
def p_PrintStrStmt(p):
'''PrintStrStmt : PRINTLN LROUND string_lit RROUND
'''
p[0] = p[3]
name = symbol_table.current_scope + '_' + gen('str_list')
parametersNode = SymbolTableNode(p[3].data, p[3].input_type)
newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode])
symbol_table.add_var(newNode)
p[0].TAC.add_line(['print_str', name, '', ''])
p[0].name = 'PrintStrStmt'
return
def p_Declaration(p):
'''Declaration : ConstDecl
| TypeDecl
| VarDecl
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Declaration'
return
def p_ConstDecl(p):
'''ConstDecl : CONST LROUND ConstSpecList RROUND
| CONST ConstSpec
'''
parsed.append(p.slice)
return
def p_ConstSpecList(p):
'''ConstSpecList : empty
| ConstSpecList ConstSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_ConstSpec(p):
'''ConstSpec : IDENTIFIER
| IdentifierList
| IDENTIFIER EQ Expression
| IdentifierList EQ ExpressionList
| IDENTIFIER Type EQ Expression
| IdentifierList Type EQ ExpressionList
'''
parsed.append(p.slice)
return
def p_IdentifierList(p):
'''IdentifierList : IDENTIFIER COMMA IdentifierBotList
'''
parsed.append(p.slice)
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_IdentifierBotList(p):
'''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList
| IDENTIFIER
'''
parsed.append(p.slice)
if len(p) == 2:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node])
elif len(p) == 4:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_ExpressionList(p):
'''ExpressionList : Expression COMMA ExpressionBotList
'''
parsed.append(p.slice)
p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ExpressionBotList(p):
'''ExpressionBotList : Expression COMMA ExpressionBotList
| Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC)
elif len(p) == 4:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_TypeDecl(p):
'''TypeDecl : TYPE TypeSpecTopList
'''
parsed.append(p.slice)
return
def p_TypeSpecTopList(p):
'''TypeSpecTopList : TypeSpec
| LROUND TypeSpecList RROUND
'''
parsed.append(p.slice)
return
def p_TypeSpecList(p):
'''TypeSpecList : empty
| TypeSpecList TypeSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_TypeSpec(p):
'''TypeSpec : AliasDecl
| TypeDef
'''
parsed.append(p.slice)
return
def p_AliasDecl(p):
'''AliasDecl : IDENTIFIER EQ Type
'''
parsed.append(p.slice)
return
def p_TypeDef(p):
'''TypeDef : IDENTIFIER Type
'''
parsed.append(p.slice)
return
def p_Type(p):
'''Type : TypeLit
| StandardTypes
| LROUND Type RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Type'
return
def p_StandardTypes(p):
'''StandardTypes : PREDEFINED_TYPES
'''
parsed.append(p.slice)
p[0] = TreeNode('StandardTypes', p[1], 'NONE')
return
def p_TypeLit(p):
'''TypeLit : ArrayType
| StructType
| FunctionType
| PointerType
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'TypeLit'
return
def p_PointerType(p):
'''PointerType : STAR Type
'''
parsed.append(p.slice)
return
def p_ArrayType(p):
'''ArrayType : LSQUARE ArrayLength RSQUARE Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ArrayType', p[2].data, p[4].data)
return
def p_ArrayLength(p):
'''ArrayLength : Expression
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'ArrayLength'
return
def p_StructType(p):
'''StructType : STRUCT LCURLY FieldDeclList RCURLY
'''
parsed.append(p.slice)
return
def p_FieldDeclList(p):
'''FieldDeclList : empty
| FieldDeclList FieldDecl SEMICOLON
'''
parsed.append(p.slice)
return
def p_FieldDecl(p):
'''FieldDecl : IdentifierList Type TagTop
| IDENTIFIER Type TagTop
'''
parsed.append(p.slice)
return
def p_TagTop(p):
'''TagTop : empty
| Tag
'''
parsed.append(p.slice)
return
def p_Tag(p):
'''Tag : string_lit
'''
parsed.append(p.slice)
return
def p_FunctionType(p):
'''FunctionType : FUNC Signature
'''
parsed.append(p.slice)
return
def p_Signature(p):
'''Signature : Parameters
| Parameters Result
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Signature'
s = 'scope_' + str(len(generated['scope']))
symbol_table.new_scope(s)
for child in p[1].children:
symbol_table.add_identifier(child, s)
newNode = SymbolTableNode(s + '_' + child.data, child.input_type)
symbol_table.add_var(newNode, s)
# symbol_table.print_symbol_table()
if len(p) == 2:
p[0].input_type = TreeNode('Result', 0, 'None')
else:
p[0].input_type = p[2]
return
def p_Result(p):
'''Result : Parameters
| Type
'''
parsed.append(p.slice)
if p[1].name == 'Type':
p[0] = TreeNode('Result', 1, 'None', 0, [p[1]])
else:
p[0] = p[1]
p[0].name = 'Result'
return
def p_Parameters(p):
'''Parameters : LROUND RROUND
| LROUND ParameterList RROUND
'''
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Parameters', 0, 'None')
else:
p[0] = p[2]
p[0].name = 'Parameters'
return
def p_ParameterList(p):
'''ParameterList : ParameterDecl
| ParameterList COMMA ParameterDecl
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
p[0].name = 'ParameterList'
elif len(p) == 4:
p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ParameterDecl(p):
'''ParameterDecl : IdentifierList Type
| IDENTIFIER Type
| Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ParameterDecl', 0, 'None')
if len(p) == 3:
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
for node in p[1].children:
p[0].data += 1
node.input_type = p[2].data
p[0].children += [node]
else:
node = TreeNode('IDENTIFIER', p[1], p[2].data, 1)
p[0].data += 1
p[0].children += [node]
else:
p[0].data += 1
p[0].children += [p[1]]
return
def p_VarDecl(p):
'''VarDecl : VAR VarSpecTopList
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'VarDecl'
return
def p_VarSpecTopList(p):
'''VarSpecTopList : VarSpec
| LROUND VarSpecList RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'VarSpecTopList'
return
def p_VarSpecList(p):
'''VarSpecList : empty
| VarSpecList VarSpec SEMICOLON
'''
return
def p_VarSpec(p):
'''VarSpec : IDENTIFIER Type
| IDENTIFIER EQ Expression
| IDENTIFIER Type EQ Expression
| IdentifierList Type
| IdentifierList EQ ExpressionList
| IdentifierList Type EQ ExpressionList
'''
# Insert into symbol table
p[0] = TreeNode('VarSpec', 0, 'NONE')
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
zero_val = TreeNode('decimal_lit', 0, 'INT')
# l1 = len(p[1].children)
# if len(p) == 3:
# expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1)
# elif len(p) == 4:
# expr_list = p[3]
# elif len(p) == 5:
# expr_list = p[4]
# l2 = len(expr_list.children)
# p[0].TAC.append_TAC(expr_list.TAC)
# p[0].TAC.append_TAC(p[1].TAC)
# if l1 == l2:
# for i in range(l1):
# p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, ''])
# else:
# print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
else:
p[1] = TreeNode('IDENTIFIER',p[1],'INT',1)
if p[2].input_type != 'NONE':
# array case
# p[2].print_node()
if symbol_table.add_identifier(p[1], size = p[2].data) == False:
print_error("Unable to add to SymbolTable")
return
name = symbol_table.search_identifier(p[1].data)
newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data)
symbol_table.add_var(newNode)
p[0] = TreeNode('VarSpec',p[1].data,'INT')
# expr = TreeNode('Expr', 0, 'NONE')
# if len(p) == 4:
# expr = p[3]
# p[0].TAC.append_TAC(p[3].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
# elif len(p) == 5:
# expr = p[4]
# p[0].TAC.append_TAC(p[4].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
return
def p_FunctionDecl(p):
'''FunctionDecl : FUNC FunctionName Signature
| FUNC FunctionName Signature FunctionBody
'''
parsed.append(p.slice)
# symbol_table.print_symbol_table()
p[0] = TreeNode('FunctionDecl', 0, 'INT')
# print symbol_table.current_scope
# p[4].TAC.print_code()
symbol_table.add_function(p[2].data, p[3].input_type, p[3].children)
if len(p) == 5:
noOfParams = 0
for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']:
if f.name == p[2].data:
noOfParams = len(f.parameters)
p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), ''])
for child in reversed(p[3].children):
p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', ''])
p[0].TAC.add_line(['stack_push', '', '', ''])
p[0].TAC.append_TAC(p[4].TAC)
return
def p_FunctionName(p):
'''FunctionName : IDENTIFIER
'''
parsed.append(p.slice)
p[0] = TreeNode('FunctionName', p[1], 'INT', 1)
return
def p_FunctionBody(p):
'''FunctionBody : Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'FunctionBody'
return
def p_SimpleStmt(p):
'''SimpleStmt : Expression
| Assignment
| ShortVarDecl
| IncDecStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'SimpleStmt'
return
def p_IncDecStmt(p):
'''IncDecStmt : Expression PLUS_PLUS
| Expression MINUS_MINUS
'''
parsed.append(p.slice)
one_val = TreeNode('IncDecStmt', '1', 'INT')
p[0] = p[1]
if p[1].isLvalue == 1:
if p[2] == '++':
p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
print_error("Lvalue required")
p[0].name = 'IncDecStmt'
return
def p_ShortVarDecl(p):
'''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList
| Expression ASSIGN_OP Expression
'''
parsed.append(p.slice)
# TODO: Add in symbol table
p[0] = TreeNode('ShortVarDecl', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1].children[i]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), ''])
return
def p_Assignment(p):
'''Assignment : ExpressionList assign_op ExpressionList
| Expression assign_op Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('Assignment', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']:
print_error("Variable " + p[1].children[i].data + " is undefined")
return
if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']:
print_error("Variable " + p[3].children[i].data + " is undefined")
return
p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']:
print_error("Variable " + p[1].data + " is undefined")
return
if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']:
print_error("Variable " + p[3].data + " is undefined")
return
# print symbol_table.current_scope
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), ''])
return
def p_assign_op(p):
'''assign_op : EQ
| PLUS_EQ
| MINUS_EQ
| OR_EQ
| CARET_EQ
| STAR_EQ
| DIVIDE_EQ
| MODULO_EQ
| LS_EQ
| RS_EQ
| AMP_EQ
| AND_OR_EQ
'''
parsed.append(p.slice)
p[0] = TreeNode('assign_op', p[1], 'OPERATOR')
return
def p_IfStmt(p):
'''IfStmt : IF Expression Block
| IF Expression Block ELSE elseTail
'''
parsed.append(p.slice)
if len(p) == 4:
l1 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
if len(p) == 6:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[5].TAC)
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_elseTail(p):
'''elseTail : IfStmt
| Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'elseTail'
return
def p_SwitchStmt(p):
'''SwitchStmt : ExprSwitchStmt
'''
parsed.append(p.slice)
p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC)
return
def p_ExprSwitchStmt(p):
'''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
'''
parsed.append(p.slice)
if len(p) == 8:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('ExprSwitchStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), ''])
p[0].TAC.append_TAC(p[5].data)
for i in range(len(p[5].children)):
p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]])
p[0].TAC.add_line(['goto', l2, '', ''])
for i in range(p[5].TAC.length()):
if i in p[5].TAC.leaders[1:]:
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(p[5].TAC.code[i])
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_ExprCaseClauseList(p):
'''ExprCaseClauseList : empty
| ExprCaseClauseList ExprCaseClause
'''
parsed.append(p.slice)
TAC1 = ThreeAddressCode()
TAC2 = ThreeAddressCode()
if len(p) == 3:
TAC1 = p[1].data
TAC2 = p[2].data
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC)
p[0].TAC.add_leader(p[0].TAC.length())
p[0].TAC.append_TAC(p[2].TAC)
p[0].data.append_TAC(TAC2)
else:
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT')
return
def p_ExprCaseClause(p):
'''ExprCaseClause : ExprSwitchCase COLON StatementList
'''
parsed.append(p.slice)
l1 = gen('label')
p[0] = TreeNode('ExprCaseClause', 0, 'INT')
# p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
# p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].children = [[p[1].data,l1]]
p[0].data = p[1].TAC
return
def p_ExprSwitchCase(p):
'''ExprSwitchCase : CASE ExpressionList
| DEFAULT
| CASE Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('ExprSwitchCase', 0, 'INT')
if len(p) == 3:
p[0].data = p[2].data
p[0].TAC = p[2].TAC
return
def p_ForStmt(p):
'''ForStmt : FOR Expression Block
| FOR Block
'''
parsed.append(p.slice)
p[0] = TreeNode('ForStmt', 0, 'INT')
if len(p) == 4:
l1 = gen('label')
l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
p[0].TAC.add_line(['label', l2, '', ''])
if len(p) == 3:
l1 = gen('label')
# l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
# p[0].TAC.add_line([l2])
return
def p_ReturnStmt(p):
'''ReturnStmt : RETURN
| RETURN Expression
| RETURN ExpressionList
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ReturnStmt', 0, 'None')
p[0].TAC.add_line(['return', '', '', ''])
if len(p) == 3:
if p[2].name == 'Expression':
p[0] = p[2]
p[0].name = 'ReturnStmt'
p[0].TAC.add_line(['return', check_variable(p[2]), '', ''])
return
def p_BreakStmt(p):
'''BreakStmt : BREAK IDENTIFIER
'''
parsed.append(p.slice)
return
def p_ContinueStmt(p):
'''ContinueStmt : CONTINUE IDENTIFIER
'''
parsed.append(p.slice)
return
def p_GotoStmt(p):
'''GotoStmt : GOTO IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Expression(p):
'''Expression : UnaryExpr
| Expression OR_OR Expression
| Expression AMP_AMP Expression
| Expression EQ_EQ Expression
| Expression NOT_EQ Expression
| Expression LT Expression
| Expression LT_EQ Expression
| Expression GT Expression
| Expression GT_EQ Expression
| Expression PLUS Expression
| Expression MINUS Expression
| Expression OR Expression
| Expression CARET Expression
| Expression STAR Expression
| Expression DIVIDE Expression
| Expression MODULO Expression
| Expression LS Expression
| Expression RS Expression
| Expression AMP Expression
| Expression AND_OR Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])])
p[0].name = 'Expression'
return
def p_UnaryExpr(p):
'''UnaryExpr : PrimaryExpr
| unary_op UnaryExpr
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), ''])
p[0].name = 'UnaryExpr'
return
def p_unary_op(p):
'''unary_op : PLUS
| MINUS
| NOT
| CARET
| STAR
| AMP
| LT_MINUS
'''
parsed.append(p.slice)
p[0] = TreeNode('unary_op', p[1], 'OPERATOR')
return
def p_PrimaryExpr(p):
'''PrimaryExpr : Operand
| IDENTIFIER
| PrimaryExpr Selector
| PrimaryExpr Index
| PrimaryExpr Arguments
'''
parsed.append(p.slice)
if len(p) == 2:
if p.slice[1].type == 'IDENTIFIER':
p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1)
elif p[1].name == 'Operand':
p[0] = p[1]
elif len(p) == 3:
if p[2].name == 'Index':
p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data)
elif p[2].name == 'Arguments':
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.append_TAC(p[2].TAC)
# p[1].print_node()
func = check_variable(p[1]).split("_")
scope, funcName = "_".join(func[:2]), "_".join(func[2:])
temp = 0
for f in symbol_table.symbol_table[scope]['functions']:
if f.name == funcName:
temp = len(f.parameters)
# p[2].print_node()
for child in p[2].children:
p[0].TAC.add_line(['putparam', check_variable(child), '', ''])
if temp != p[2].data:
print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied')
p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), ''])
p[0].TAC.add_line(['return_value', check_variable(p[0]), '', ''])
p[0].name = 'PrimaryExpr'
return
def p_Operand(p):
'''Operand : Literal
| LROUND Expression RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Operand'
return
def p_Literal(p):
'''Literal : BasicLit
| FunctionLit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Literal'
return
def p_BasicLit(p):
'''BasicLit : int_lit
| float_lit
| string_lit
| rune_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'BasicLit'
return
def p_int_lit(p):
'''int_lit : decimal_lit
| octal_lit
| hex_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'int_lit'
return
def p_decimal_lit(p):
'''decimal_lit : DECIMAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('decimal_lit', p[1], 'INT')
return
def p_octal_lit(p):
'''octal_lit : OCTAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('octal_lit', p[1], 'OCT')
return
def p_hex_lit(p):
'''hex_lit : HEX_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('hex_lit', p[1], 'HEX')
return
def p_float_lit(p):
'''float_lit : FLOAT_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('float_lit', p[1], 'FLOAT')
return
def p_FunctionLit(p):
'''FunctionLit : FUNC Signature FunctionBody
'''
parsed.append(p.slice)
# Anonymous Function
# Not implemented yet
return
def p_Selector(p):
'''Selector : DOT IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Index(p):
'''Index : LSQUARE Expression RSQUARE
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'Index'
return
def p_Arguments(p):
'''Arguments : LROUND RROUND
| LROUND ExpressionList RROUND
| LROUND Expression RROUND
| LROUND Type RROUND
| LROUND Type COMMA ExpressionList RROUND
| LROUND Type COMMA Expression RROUND
'''
# print p.slice
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Arguments', 0, 'None')
if len(p) == 4:
if p[2].name == 'Expression':
p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC)
if p[2].name == 'ExpressionList':
p[0] = p[2]
p[0].name = 'Arguments'
p[0].data = len(p[2].children)
return
def p_string_lit(p):
'''string_lit : STRING_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('string_lit', p[1], 'STRING')
return
def p_rune_lit(p):
'''rune_lit : RUNE_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('rune_lit', p[1], 'RUNE')
return
def p_empty(p):
'empty :'
pass
def p_error(p):
print p
if p == None:
print str(sys.argv[1]) + " :: You missed something at the end"
else:
print str(sys.argv[1]) + " :: Syntax error in line no " + str(p.lineno)
# Standard Logger
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
yacc.yacc(debug=True, debuglog=log)
input_file = sys.argv[1]
import os
if os.path.isfile(input_file) is False:
print('Input file ' + input_file + ' does not exist')
sys.exit(1)
input_code = open(input_file, 'r').read()
if input_code[len(input_code)-1] != '\n':
input_code += '\n'
yacc.parse(input_code, debug=log, tracking=True) | en | 0.275883 | #!/usr/bin/python # return 2 values. first is the name for the variable, second is 0 if variable not found # TreeNode.print_node() # symbol_table.print_symbol_table() SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList # TODO: Ignoring package name and Imports for now # p[0].TAC.print_code() # three_addr_code.print_code() # symbol_table.print_symbol_table() ImportDeclList : ImportDecl SEMICOLON ImportDeclList | empty # TODO: Ignoring Imports for now TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList | empty TopLevelDecl : Declaration | FunctionDecl ImportDecl : IMPORT LROUND ImportSpecList RROUND | IMPORT ImportSpec # TODO: Ignoring Imports for now ImportSpecList : ImportSpec SEMICOLON ImportSpecList | empty # TODO: Ignoring Imports for now ImportSpec : DOT string_lit | IDENTIFIER string_lit | empty string_lit # TODO: Ignoring Imports for now Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY ScopeStart : empty ScopeEnd : empty StatementList : Statement SEMICOLON StatementList | empty Statement : Declaration | SimpleStmt | ReturnStmt | Block | IfStmt | SwitchStmt | ForStmt | BreakStmt | ContinueStmt | GotoStmt | PrintIntStmt | PrintStrStmt PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND | PRINTLN LROUND int_lit RROUND # p[0].isLvalue = 0 PrintStrStmt : PRINTLN LROUND string_lit RROUND Declaration : ConstDecl | TypeDecl | VarDecl ConstDecl : CONST LROUND ConstSpecList RROUND | CONST ConstSpec ConstSpecList : empty | ConstSpecList ConstSpec SEMICOLON ConstSpec : IDENTIFIER | IdentifierList | IDENTIFIER EQ Expression | IdentifierList EQ ExpressionList | IDENTIFIER Type EQ Expression | IdentifierList Type EQ ExpressionList IdentifierList : IDENTIFIER COMMA IdentifierBotList IdentifierBotList : IDENTIFIER COMMA IdentifierBotList | IDENTIFIER ExpressionList : Expression COMMA ExpressionBotList ExpressionBotList : Expression COMMA ExpressionBotList | Expression TypeDecl : TYPE TypeSpecTopList TypeSpecTopList : TypeSpec | LROUND TypeSpecList RROUND TypeSpecList : empty | TypeSpecList TypeSpec SEMICOLON TypeSpec : AliasDecl | TypeDef AliasDecl : IDENTIFIER EQ Type TypeDef : IDENTIFIER Type Type : TypeLit | StandardTypes | LROUND Type RROUND StandardTypes : PREDEFINED_TYPES TypeLit : ArrayType | StructType | FunctionType | PointerType PointerType : STAR Type ArrayType : LSQUARE ArrayLength RSQUARE Type ArrayLength : Expression StructType : STRUCT LCURLY FieldDeclList RCURLY FieldDeclList : empty | FieldDeclList FieldDecl SEMICOLON FieldDecl : IdentifierList Type TagTop | IDENTIFIER Type TagTop TagTop : empty | Tag Tag : string_lit FunctionType : FUNC Signature Signature : Parameters | Parameters Result # symbol_table.print_symbol_table() Result : Parameters | Type Parameters : LROUND RROUND | LROUND ParameterList RROUND ParameterList : ParameterDecl | ParameterList COMMA ParameterDecl ParameterDecl : IdentifierList Type | IDENTIFIER Type | Type VarDecl : VAR VarSpecTopList VarSpecTopList : VarSpec | LROUND VarSpecList RROUND VarSpecList : empty | VarSpecList VarSpec SEMICOLON VarSpec : IDENTIFIER Type | IDENTIFIER EQ Expression | IDENTIFIER Type EQ Expression | IdentifierList Type | IdentifierList EQ ExpressionList | IdentifierList Type EQ ExpressionList # Insert into symbol table # l1 = len(p[1].children) # if len(p) == 3: # expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1) # elif len(p) == 4: # expr_list = p[3] # elif len(p) == 5: # expr_list = p[4] # l2 = len(expr_list.children) # p[0].TAC.append_TAC(expr_list.TAC) # p[0].TAC.append_TAC(p[1].TAC) # if l1 == l2: # for i in range(l1): # p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, '']) # else: # print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)") # array case # p[2].print_node() # expr = TreeNode('Expr', 0, 'NONE') # if len(p) == 4: # expr = p[3] # p[0].TAC.append_TAC(p[3].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) # elif len(p) == 5: # expr = p[4] # p[0].TAC.append_TAC(p[4].TAC) # p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), '']) FunctionDecl : FUNC FunctionName Signature | FUNC FunctionName Signature FunctionBody # symbol_table.print_symbol_table() # print symbol_table.current_scope # p[4].TAC.print_code() FunctionName : IDENTIFIER FunctionBody : Block SimpleStmt : Expression | Assignment | ShortVarDecl | IncDecStmt IncDecStmt : Expression PLUS_PLUS | Expression MINUS_MINUS ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList | Expression ASSIGN_OP Expression # TODO: Add in symbol table Assignment : ExpressionList assign_op ExpressionList | Expression assign_op Expression # print symbol_table.current_scope assign_op : EQ | PLUS_EQ | MINUS_EQ | OR_EQ | CARET_EQ | STAR_EQ | DIVIDE_EQ | MODULO_EQ | LS_EQ | RS_EQ | AMP_EQ | AND_OR_EQ IfStmt : IF Expression Block | IF Expression Block ELSE elseTail elseTail : IfStmt | Block SwitchStmt : ExprSwitchStmt ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY | SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY ExprCaseClauseList : empty | ExprCaseClauseList ExprCaseClause ExprCaseClause : ExprSwitchCase COLON StatementList # p[0].TAC.append_TAC(p[1].TAC) # p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1]) ExprSwitchCase : CASE ExpressionList | DEFAULT | CASE Expression ForStmt : FOR Expression Block | FOR Block # l2 = gen('label') # p[0].TAC.add_line([l2]) ReturnStmt : RETURN | RETURN Expression | RETURN ExpressionList BreakStmt : BREAK IDENTIFIER ContinueStmt : CONTINUE IDENTIFIER GotoStmt : GOTO IDENTIFIER Expression : UnaryExpr | Expression OR_OR Expression | Expression AMP_AMP Expression | Expression EQ_EQ Expression | Expression NOT_EQ Expression | Expression LT Expression | Expression LT_EQ Expression | Expression GT Expression | Expression GT_EQ Expression | Expression PLUS Expression | Expression MINUS Expression | Expression OR Expression | Expression CARET Expression | Expression STAR Expression | Expression DIVIDE Expression | Expression MODULO Expression | Expression LS Expression | Expression RS Expression | Expression AMP Expression | Expression AND_OR Expression UnaryExpr : PrimaryExpr | unary_op UnaryExpr unary_op : PLUS | MINUS | NOT | CARET | STAR | AMP | LT_MINUS PrimaryExpr : Operand | IDENTIFIER | PrimaryExpr Selector | PrimaryExpr Index | PrimaryExpr Arguments # p[1].print_node() # p[2].print_node() Operand : Literal | LROUND Expression RROUND Literal : BasicLit | FunctionLit BasicLit : int_lit | float_lit | string_lit | rune_lit int_lit : decimal_lit | octal_lit | hex_lit decimal_lit : DECIMAL_LIT octal_lit : OCTAL_LIT hex_lit : HEX_LIT float_lit : FLOAT_LIT FunctionLit : FUNC Signature FunctionBody # Anonymous Function # Not implemented yet Selector : DOT IDENTIFIER Index : LSQUARE Expression RSQUARE Arguments : LROUND RROUND | LROUND ExpressionList RROUND | LROUND Expression RROUND | LROUND Type RROUND | LROUND Type COMMA ExpressionList RROUND | LROUND Type COMMA Expression RROUND # print p.slice string_lit : STRING_LIT rune_lit : RUNE_LIT # Standard Logger | 2.680178 | 3 |
render_video.py | frostburn/branch-cut-mandelbrot | 0 | 478 | <gh_stars>0
import argparse
import imageio
import progressbar
from _routines import ffi, lib
from pylab import *
from random import Random
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
max_iter = 32
im_buf = ffi.new("double[]", args.width * args.height)
cut_buf = ffi.new("double[]", max_iter)
fixed_seed = Random(1)
for i in range(max_iter):
cut_buf[i] = i*fixed_seed.random()
for n in progressbar.progressbar(range(args.num_frames)):
tg = n / (args.num_frames - 1)
t = tg
lib.mandelbrot(im_buf, args.width, args.height, 0.7, 0.8, 3.5, t-20, cut_buf, max_iter)
im = array(list(im_buf)).reshape(args.height, args.width)
# for i in range(max_iter):
# cut_buf[i] *= 0.05**args.dt
bg = (im < 0)
im /= im.max()
fg = 1 - bg
red = im
green = 1 - im
blue = 4*im*(1-im)
blue = blue + 0.2*green
red = 0.1 + 0.8*red + green**3
green = 0.2 + 0.21*green
frame = make_video_frame([red*fg + 0.15*bg, green*fg + 0.08*bg, blue*fg + 0.1*bg], indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render audio samples')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--params', type=str, help='Parameter YAML file name')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close()
| import argparse
import imageio
import progressbar
from _routines import ffi, lib
from pylab import *
from random import Random
RESOLUTIONS = {
"2160p": (3840, 2160),
"1440p": (2560, 1440),
"1080p": (1920, 1080),
"720p": (1280, 720),
"480p": (854, 480),
"360p": (640, 360),
"240p": (426, 240),
"160p": (284, 160),
"80p": (142, 80),
"40p": (71, 40),
}
def make_video_frame(rgb, indexing='ij', dither=1.0/256.0):
if dither:
rgb = [channel + random(channel.shape)*dither for channel in rgb]
if indexing == 'ij':
rgb = [channel.T for channel in rgb]
frame = stack(rgb, axis=-1)
frame = clip(frame, 0.0, 1.0)
return (frame * 255).astype('uint8')
def do_render(args, writer):
max_iter = 32
im_buf = ffi.new("double[]", args.width * args.height)
cut_buf = ffi.new("double[]", max_iter)
fixed_seed = Random(1)
for i in range(max_iter):
cut_buf[i] = i*fixed_seed.random()
for n in progressbar.progressbar(range(args.num_frames)):
tg = n / (args.num_frames - 1)
t = tg
lib.mandelbrot(im_buf, args.width, args.height, 0.7, 0.8, 3.5, t-20, cut_buf, max_iter)
im = array(list(im_buf)).reshape(args.height, args.width)
# for i in range(max_iter):
# cut_buf[i] *= 0.05**args.dt
bg = (im < 0)
im /= im.max()
fg = 1 - bg
red = im
green = 1 - im
blue = 4*im*(1-im)
blue = blue + 0.2*green
red = 0.1 + 0.8*red + green**3
green = 0.2 + 0.21*green
frame = make_video_frame([red*fg + 0.15*bg, green*fg + 0.08*bg, blue*fg + 0.1*bg], indexing=None)
writer.append_data(frame)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Render audio samples')
parser.add_argument('outfile', type=str, help='Output file name')
parser.add_argument('--params', type=str, help='Parameter YAML file name')
parser.add_argument('--resolution', choices=RESOLUTIONS.keys(), help='Video and simulation grid resolution')
parser.add_argument('--width', type=int, help='Video and simulation grid width', metavar='W')
parser.add_argument('--height', type=int, help='Video and simulation grid height', metavar='H')
parser.add_argument('--framerate', type=int, help='Video frame rate')
parser.add_argument('--video-quality', type=int, help='Video quality factor')
parser.add_argument('--video-duration', type=float, help='Duration of video to render in seconds')
args = parser.parse_args()
if not args.framerate:
args.framerate = 24
if not args.video_quality:
args.video_quality = 10
writer = imageio.get_writer(args.outfile, fps=args.framerate, quality=args.video_quality, macro_block_size=1)
# Compute derived parameters
if args.resolution:
width, height = RESOLUTIONS[args.resolution]
if not args.width:
args.width = width
if not args.height:
args.height = height
if (not args.width) or (not args.height):
raise ValueError("Invalid or missing resolution")
if not args.video_duration:
raise ValueError("Missing video duration")
args.aspect = args.width / args.height
args.num_frames = int(args.video_duration * args.framerate)
args.dt = 1.0 / args.num_frames
do_render(args, writer)
writer.close() | en | 0.418657 | # for i in range(max_iter): # cut_buf[i] *= 0.05**args.dt # Compute derived parameters | 2.470915 | 2 |
tests/mqtt/test_subscribe.py | smurfix/hbmqtt | 0 | 479 | <filename>tests/mqtt/test_subscribe.py
# Copyright (c) 2015 <NAME>
#
# See the file license.txt for copying permission.
import anyio
import unittest
from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload
from hbmqtt.mqtt.packet import PacketIdVariableHeader
from hbmqtt.mqtt.constants import QOS_1, QOS_2
from hbmqtt.adapters import BufferAdapter
class SubscribePacketTest(unittest.TestCase):
def test_from_stream(self):
data = b'\x80\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02'
stream = BufferAdapter(data)
message = anyio.run(SubscribePacket.from_stream, stream)
(topic, qos) = message.payload.topics[0]
self.assertEqual(topic, 'a/b')
self.assertEqual(qos, QOS_1)
(topic, qos) = message.payload.topics[1]
self.assertEqual(topic, 'c/d')
self.assertEqual(qos, QOS_2)
def test_to_stream(self):
variable_header = PacketIdVariableHeader(10)
payload = SubscribePayload(
[
('a/b', QOS_1),
('c/d', QOS_2)
])
publish = SubscribePacket(variable_header=variable_header, payload=payload)
out = publish.to_bytes()
self.assertEqual(out, b'\x82\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02')
| <filename>tests/mqtt/test_subscribe.py
# Copyright (c) 2015 <NAME>
#
# See the file license.txt for copying permission.
import anyio
import unittest
from hbmqtt.mqtt.subscribe import SubscribePacket, SubscribePayload
from hbmqtt.mqtt.packet import PacketIdVariableHeader
from hbmqtt.mqtt.constants import QOS_1, QOS_2
from hbmqtt.adapters import BufferAdapter
class SubscribePacketTest(unittest.TestCase):
def test_from_stream(self):
data = b'\x80\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02'
stream = BufferAdapter(data)
message = anyio.run(SubscribePacket.from_stream, stream)
(topic, qos) = message.payload.topics[0]
self.assertEqual(topic, 'a/b')
self.assertEqual(qos, QOS_1)
(topic, qos) = message.payload.topics[1]
self.assertEqual(topic, 'c/d')
self.assertEqual(qos, QOS_2)
def test_to_stream(self):
variable_header = PacketIdVariableHeader(10)
payload = SubscribePayload(
[
('a/b', QOS_1),
('c/d', QOS_2)
])
publish = SubscribePacket(variable_header=variable_header, payload=payload)
out = publish.to_bytes()
self.assertEqual(out, b'\x82\x0e\x00\x0a\x00\x03a/b\x01\x00\x03c/d\x02')
| en | 0.766723 | # Copyright (c) 2015 <NAME> # # See the file license.txt for copying permission. | 2.358265 | 2 |
examples/cmrc2018_example/main.trainer.py | fangd123/TextBrewer | 1,121 | 480 | <filename>examples/cmrc2018_example/main.trainer.py<gh_stars>1000+
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from processing import convert_examples_to_features, read_squad_examples
from processing import ChineseFullTokenizer
from pytorch_pretrained_bert.my_modeling import BertConfig
from optimization import BERTAdam
import config
from utils import read_and_convert, divide_parameters
from modeling import BertForQASimple, BertForQASimpleAdaptorTraining
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from functools import partial
from train_eval import predict
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#read data
train_examples = None
train_features = None
eval_examples = None
eval_features = None
num_train_steps = None
tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
convert_fn = partial(convert_examples_to_features,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
if args.do_train:
train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
if args.fake_file_1:
fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples1
train_features += fake_features1
if args.fake_file_2:
fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples2
train_features += fake_features2
num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
#Build Model and load checkpoint
model_S = BertForQASimple(bert_config_S,args)
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distiller = BasicTrainer(train_config = train_config,
model = model_S,
adaptor = BertForQASimpleAdaptorTraining)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict,
eval_examples=eval_examples,
eval_features=eval_features,
args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_examples,eval_features,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
| <filename>examples/cmrc2018_example/main.trainer.py<gh_stars>1000+
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from processing import convert_examples_to_features, read_squad_examples
from processing import ChineseFullTokenizer
from pytorch_pretrained_bert.my_modeling import BertConfig
from optimization import BERTAdam
import config
from utils import read_and_convert, divide_parameters
from modeling import BertForQASimple, BertForQASimpleAdaptorTraining
from textbrewer import DistillationConfig, TrainingConfig, BasicTrainer
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from functools import partial
from train_eval import predict
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#read data
train_examples = None
train_features = None
eval_examples = None
eval_features = None
num_train_steps = None
tokenizer = ChineseFullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
convert_fn = partial(convert_examples_to_features,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length)
if args.do_train:
train_examples,train_features = read_and_convert(args.train_file,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
if args.fake_file_1:
fake_examples1,fake_features1 = read_and_convert(args.fake_file_1,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples1
train_features += fake_features1
if args.fake_file_2:
fake_examples2, fake_features2 = read_and_convert(args.fake_file_2,is_training=True, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
train_examples += fake_examples2
train_features += fake_features2
num_train_steps = int(len(train_features)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_examples,eval_features = read_and_convert(args.predict_file,is_training=False, do_lower_case=args.do_lower_case,
read_fn=read_squad_examples,convert_fn=convert_fn)
#Build Model and load checkpoint
model_S = BertForQASimple(bert_config_S,args)
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distiller = BasicTrainer(train_config = train_config,
model = model_S,
adaptor = BertForQASimpleAdaptorTraining)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_doc_mask = torch.tensor([f.doc_mask for f in train_features], dtype=torch.float)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_position for f in train_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in train_features], dtype=torch.long)
train_dataset = TensorDataset(all_input_ids, all_segment_ids, all_input_mask, all_doc_mask,
all_start_positions, all_end_positions)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict,
eval_examples=eval_examples,
eval_features=eval_features,
args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_examples,eval_features,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
| en | 0.166665 | #parse arguments #set seeds #arguments check #load bert config #read data #Build Model and load checkpoint #Load student #,output_device=n_gpu-1) #parameters ########### DISTILLATION ########### | 2.198078 | 2 |
gym/gym/benchmarks/__init__.py | youngwoon/DnC-RL-Tensorflow | 9 | 481 | <reponame>youngwoon/DnC-RL-Tensorflow<gh_stars>1-10
# EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
bandit_tasks = []
for n_arms in [5, 10, 50]:
for n_episodes in [10, 100, 500]:
bandit_tasks.append({
'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': n_episodes,
})
register_benchmark(
id='BernoulliBandit-v0',
name='BernoulliBandit',
description='Multi-armed Bernoulli bandits',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=bandit_tasks
)
tabular_mdp_tasks = []
for n_states in [10]:
for n_actions in [5]:
for episode_length in [10]:
for n_episodes in [10, 25, 50, 75, 100]:
tabular_mdp_tasks.append({
'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format(
s=n_states, a=n_actions, t=episode_length, n=n_episodes,
),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': episode_length * n_episodes * 2,
})
register_benchmark(
id='RandomTabularMDP-v0',
name='RandomTabularMDP',
description='Random tabular MDPs',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=tabular_mdp_tasks
)
| # EXPERIMENTAL: all may be removed soon
from gym.benchmarks import scoring
from gym.benchmarks.registration import benchmark_spec, register_benchmark, registry, register_benchmark_view # imports used elsewhere
register_benchmark(
id='Atari200M',
scorer=scoring.TotalReward(),
name='Atari200M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(2e8),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
},
])
register_benchmark(
id='Atari40M',
scorer=scoring.TotalReward(),
name='Atari40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'BeamRiderNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 363.9,
'reward_ceiling': 60000.0,
},
{
'env_id': 'BreakoutNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 1.7,
'reward_ceiling': 800.0,
},
{
'env_id': 'EnduroNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.0,
'reward_ceiling': 5000.0,
},
{
'env_id': 'PongNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -20.7,
'reward_ceiling': 21.0,
},
{
'env_id': 'QbertNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 163.9,
'reward_ceiling': 40000.0,
},
{
'env_id': 'SeaquestNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 68.4,
'reward_ceiling': 100000.0,
},
{
'env_id': 'SpaceInvadersNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 148.0,
'reward_ceiling': 30000.0,
}
])
register_benchmark(
id='AtariExploration40M',
scorer=scoring.TotalReward(),
name='AtariExploration40M',
view_group="Atari",
description='7 Atari games, with pixel observations',
tasks=[
{
'env_id': 'FreewayNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 0.1,
'reward_ceiling': 31.0,
},
{
'env_id': 'GravitarNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 245.5,
'reward_ceiling': 1000.0,
},
{
'env_id': 'MontezumaRevengeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 25.0,
'reward_ceiling': 10000.0,
},
{
'env_id': 'PitfallNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': -348.8,
'reward_ceiling': 1000.0,
},
{
'env_id': 'PrivateEyeNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 662.8,
'reward_ceiling': 100.0,
},
{
'env_id': 'SolarisNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 2047.2,
'reward_ceiling': 5000.0,
},
{
'env_id': 'VentureNoFrameskip-v4',
'trials': 2,
'max_timesteps': int(4e7),
'reward_floor': 18.0,
'reward_ceiling': 100.0,
}
])
register_benchmark(
id='ClassicControl2-v0',
name='ClassicControl2',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v0',
'trials': 1,
'max_timesteps': 2000,
},
{'env_id': 'Pendulum-v0',
'trials': 1,
'max_timesteps': 1000,
},
])
register_benchmark(
id='ClassicControl-v0',
name='ClassicControl',
view_group="Control",
description='Simple classic control benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'CartPole-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': 0.0,
'reward_ceiling': 500.0,
},
{'env_id': 'Acrobot-v1',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MountainCar-v0',
'trials': 3,
'max_timesteps': 100000,
'reward_floor': -200.0,
'reward_ceiling': -100.0,
},
{'env_id': 'Pendulum-v0',
'trials': 3,
'max_timesteps': 200000,
'reward_floor': -1400.0,
'reward_ceiling': 0.0,
},
])
### Autogenerated by tinkerbell.benchmark.convert_benchmark.py
register_benchmark(
id='Mujoco10M-v0',
name='Mujoco10M',
view_group="Control",
description='Mujoco benchmark with 10M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'Ant-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Hopper-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Humanoid-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'HumanoidStandup-v1',
'trials': 1,
'max_timesteps': 1000000,
},
{'env_id': 'Walker2d-v1',
'trials': 1,
'max_timesteps': 1000000,
}
])
register_benchmark(
id='Mujoco1M-v0',
name='Mujoco1M',
view_group="Control",
description='Mujoco benchmark with 1M steps',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'HalfCheetah-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -280.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'Hopper-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 16.0,
'reward_ceiling': 4000.0,
},
{'env_id': 'InvertedDoublePendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 53.0,
'reward_ceiling': 10000.0,
},
{'env_id': 'InvertedPendulum-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 5.6,
'reward_ceiling': 1000.0,
},
{'env_id': 'Reacher-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': -43.0,
'reward_ceiling': -0.5,
},
{'env_id': 'Swimmer-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 0.23,
'reward_ceiling': 500.0,
},
{'env_id': 'Walker2d-v1',
'trials': 3,
'max_timesteps': 1000000,
'reward_floor': 1.6,
'reward_ceiling': 5500.0,
}
])
register_benchmark(
id='MinecraftEasy-v0',
name='MinecraftEasy',
view_group="Minecraft",
description='Minecraft easy benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftBasic-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -2200.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftDefaultFlat1-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -500.0,
'reward_ceiling': 0.0,
},
{'env_id': 'MinecraftTrickyArena1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -1000.0,
'reward_ceiling': 2800.0,
},
{'env_id': 'MinecraftEating1-v0',
'trials': 2,
'max_timesteps': 300000,
'reward_floor': -300.0,
'reward_ceiling': 300.0,
},
])
register_benchmark(
id='MinecraftMedium-v0',
name='MinecraftMedium',
view_group="Minecraft",
description='Minecraft medium benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftCliffWalking1-v0',
'trials': 2,
'max_timesteps': 400000,
'reward_floor': -100.0,
'reward_ceiling': 100.0,
},
{'env_id': 'MinecraftVertical-v0',
'trials': 2,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 8040.0,
},
{'env_id': 'MinecraftMaze1-v0',
'trials': 2,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
{'env_id': 'MinecraftMaze2-v0',
'trials': 2,
'max_timesteps': 2000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftHard-v0',
name='MinecraftHard',
view_group="Minecraft",
description='Minecraft hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftObstacles-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 2080.0,
},
{'env_id': 'MinecraftSimpleRoomMaze-v0',
'trials': 1,
'max_timesteps': 900000,
'reward_floor': -1000.0,
'reward_ceiling': 4160.0,
},
{'env_id': 'MinecraftAttic-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1040.0,
},
{'env_id': 'MinecraftComplexityUsage-v0',
'trials': 1,
'max_timesteps': 600000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
register_benchmark(
id='MinecraftVeryHard-v0',
name='MinecraftVeryHard',
view_group="Minecraft",
description='Minecraft very hard benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftMedium-v0',
'trials': 2,
'max_timesteps': 1800000,
'reward_floor': -10000.0,
'reward_ceiling': 16280.0,
},
{'env_id': 'MinecraftHard-v0',
'trials': 2,
'max_timesteps': 2400000,
'reward_floor': -10000.0,
'reward_ceiling': 32640.0,
},
])
register_benchmark(
id='MinecraftImpossible-v0',
name='MinecraftImpossible',
view_group="Minecraft",
description='Minecraft impossible benchmark',
scorer=scoring.ClipTo01ThenAverage(),
tasks=[
{'env_id': 'MinecraftDefaultWorld1-v0',
'trials': 2,
'max_timesteps': 6000000,
'reward_floor': -1000.0,
'reward_ceiling': 1000.0,
},
])
bandit_tasks = []
for n_arms in [5, 10, 50]:
for n_episodes in [10, 100, 500]:
bandit_tasks.append({
'env_id': 'BernoulliBandit-{k}.arms-{n}.episodes-v0'.format(k=n_arms, n=n_episodes),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': n_episodes,
})
register_benchmark(
id='BernoulliBandit-v0',
name='BernoulliBandit',
description='Multi-armed Bernoulli bandits',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=bandit_tasks
)
tabular_mdp_tasks = []
for n_states in [10]:
for n_actions in [5]:
for episode_length in [10]:
for n_episodes in [10, 25, 50, 75, 100]:
tabular_mdp_tasks.append({
'env_id': 'RandomTabularMDP-{s}.states-{a}.actions-{t}.timesteps-{n}.episodes-v0'.format(
s=n_states, a=n_actions, t=episode_length, n=n_episodes,
),
'trials': 1,
'max_timesteps': 10 ** 9,
'reward_floor': 0,
'reward_ceiling': episode_length * n_episodes * 2,
})
register_benchmark(
id='RandomTabularMDP-v0',
name='RandomTabularMDP',
description='Random tabular MDPs',
scorer=scoring.ClipTo01ThenAverage(num_episodes=1000),
tasks=tabular_mdp_tasks
) | en | 0.639334 | # EXPERIMENTAL: all may be removed soon # imports used elsewhere ### Autogenerated by tinkerbell.benchmark.convert_benchmark.py | 1.631996 | 2 |
hypnettorch/data/timeseries/preprocess_audioset.py | pennfranc/hypnettorch | 31 | 482 | <filename>hypnettorch/data/timeseries/preprocess_audioset.py
#!/usr/bin/env python3
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/preprocess_audioset.py
# author :be
# contact :<EMAIL>
# created :31/03/2020
# version :1.0
# python_version :3.7
"""
Script to structure the audioset dataset, which can then be used via
:class:`data.timeseries.audioset_data.AudiosetData`.
The result of this script is available at
https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0
If you want to recreate or modify this dataset, download the audioset data from
https://research.google.com/audioset/download.html
and extract the tar.gz into the following folder:
``datasets/sequential/audioset/audioset_download``.
Subsequently executing this script will create a pickle file containing the 100
class subset of audioset used in this study.
The dataset is stored in tensorflow files. Since we work with pytorch and there
is no utility to read tensorflow files, we extract the data and safe them as
numpy arrays in a pickle file.
Furthermore the data are preprocessed to fit our continual learning experiments.
The original dataset provides three subsets with different compositions of
samples and classes. Since we only work with a subset of classes and samples,
we load all available data and then filter and structure them according to our
criteria.
We use the same criteria as Kemker et al. Classes and samples are restricted in
the following way:
Classes:
- no restriction according to ontology file (parsed from ontology.json)
- no parent / child relationship (parsed from ontology.json)
- confidence level > 70% (data was copied from website into txt file)
- number of samples: we only take classes that have more samples than
a certain threshold
Samples:
- since samples can have multiple labels, we only use samples which
only belong to one of the classes we use
- we exclude samples that don't have the full length of 10 seconds
The chosen classes and samples are then split into train and test data and
saved to a pickle file.
"""
import numpy as np
import pickle
import tensorflow as tf
import os
import json
from warnings import warn
warn('The script was created for one time usage and has to be adapted when ' +
'reusing it. All paths specified here are absolute.')
# Tensorflow eager mode needs to be enabled for dataset mapping to work!
tf.enable_eager_execution()
# Set paths and parameters
data_dir = '../../datasets/sequential/audioset/'
download_dir = os.path.join(data_dir,'audioset_download')
fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv')
fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv')
fpath_ontology = os.path.join(data_dir, 'ontology.json')
target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle')
n_classes = 100
n_sample = 1000
test_frac = 0.20
### Load data by serializing files and applying decode function.
def decode(serialized_example):
"""Decode data from TFRecord files.
Args:
serialized_example: serialized_example as created by
tf.data.TFRecordDataset
Returns:
(tuple): Tuple containing:
- **audio** (numpy.ndarray): Array of shape (10,128) representing one
sample with 10 timesteps and 128 features
- **label** (numpy.ndarray): Array of shape (1,) containing the class
of the corresponding sample
"""
sequence_features = {
'audio_embedding': tf.FixedLenSequenceFeature([], tf.string),
}
context_features = {
'start_time_seconds': tf.FixedLenFeature([], tf.float32),
'labels': tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
sequence_features=sequence_features,
context_features=context_features
)
audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8)
label = tf.cast(context_parsed['labels'], tf.int64)
return audio, label
# Apply decode function to all dataset entries using map function.
# Take files from all three data sets since we repartition anyway.
fpaths = []
for path, subdirs, files in os.walk(download_dir):
for name in files:
if 'tfrecord' in name:
fpaths.append(os.path.join(path, name))
# Create dataset and decode
dataset = tf.data.TFRecordDataset(fpaths)
dataset = dataset.map(decode)
# Extract data to lists
x = []
y = []
for d in dataset:
x.append(d[0].numpy())
y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy())
### Filter classes as described above.
# Parse confidence values
conf_data = {}
with open(fpath_conf_data) as f:
for line in f:
tokens = line.split()
# parse confidence
c = 0
for t in tokens:
if t.find('%') is not -1:
c = int(t[:-1])
# parse class name
n = ''
for t in tokens:
if t.find('%') == -1 and t != '-':
if n == '':
n = t
else:
n = n+' '+t
else:
break
conf_data.update({n:c})
# Parse class numbers from label csv file
l = -1
csv_data = {}
with open(fpath_label_inds) as f:
for line in f:
if l == -1:
l += 1
continue
tokens = line.split('"')
n = tokens[1]
csv_data.update({n:l})
l +=1
# Parse ontology info from json file
with open(fpath_ontology, 'r') as f:
json_data = json.load(f)
# Put all data into a single list.
all_data = []
for j in json_data:
if j['name'] in conf_data.keys():
class_info = {
'name' : j['name'],
'restricted' : j['restrictions'] != [],
'has_child' : j['child_ids'] != [],
'conf' : conf_data[j['name']],
'id' : csv_data[j['name']]
}
all_data.append(class_info)
# Filter classes
classes = []
for c in all_data:
if not c['restricted'] and not c['has_child'] and c['conf'] >= 70:
classes.append(c['id'])
### Filter the samples.
# Find samples that belong to only one of the potential classes.
# We also exclude some samples that don't have data for the full 10 seconds.
# First discard labels that are not in the set of potential classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],classes))
# Find samples with one label
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that are shorter than 10 seconds (to be excluded)
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Count number of valid samples for potential classes
y_single = np.asarray([y_fil[i][0] for i in valid_idx])
num_samples = [len(np.where(y_single == i)[0]) for i in classes]
# Take the n classes with the highest number of samples
n_sample_cutoff = np.sort(num_samples)[-n_classes]
class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0]
our_classes = [classes[i] for i in class_idx]
### Filter the data again according the the chosen classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],our_classes))
# Find samples that belong to only one of the potential classes
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that dont are shorter than 10 seconds
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Restructure data and relabel the classes to be between 0 and n_classes
y_data = [y_fil[i][0] for i in valid_idx]
y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data]
y_data = np.asarray(y_data)
x_data = [x[i] for i in valid_idx]
x_data = np.stack(x_data)
### Split into test and train and restrict the number of samples per class
np.random.seed(42)
n_train = int(n_sample * (1-test_frac))
n_test = int(n_sample * test_frac)
train_ind = []
test_ind = []
for i in range(n_classes):
sample_idx = np.where(y_data == i)[0]
n_sample_class = len(sample_idx)
rand_idx = np.arange(n_sample_class)
np.random.shuffle(rand_idx)
train_ind.extend(sample_idx[rand_idx[0:n_train]])
test_ind.extend(sample_idx[rand_idx[n_train:n_sample]])
train_ind = np.asarray(train_ind)
test_ind = np.asarray(test_ind)
sub_sample_idx = np.hstack((train_ind,test_ind))
x_data_sub = x_data[sub_sample_idx,:,:]
y_data_sub = y_data[sub_sample_idx]
train_ind = np.arange(0,len(train_ind))
test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind))
### Save data
with open(target_path, 'wb') as f:
pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
| <filename>hypnettorch/data/timeseries/preprocess_audioset.py
#!/usr/bin/env python3
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# title :data/timeseries/preprocess_audioset.py
# author :be
# contact :<EMAIL>
# created :31/03/2020
# version :1.0
# python_version :3.7
"""
Script to structure the audioset dataset, which can then be used via
:class:`data.timeseries.audioset_data.AudiosetData`.
The result of this script is available at
https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0
If you want to recreate or modify this dataset, download the audioset data from
https://research.google.com/audioset/download.html
and extract the tar.gz into the following folder:
``datasets/sequential/audioset/audioset_download``.
Subsequently executing this script will create a pickle file containing the 100
class subset of audioset used in this study.
The dataset is stored in tensorflow files. Since we work with pytorch and there
is no utility to read tensorflow files, we extract the data and safe them as
numpy arrays in a pickle file.
Furthermore the data are preprocessed to fit our continual learning experiments.
The original dataset provides three subsets with different compositions of
samples and classes. Since we only work with a subset of classes and samples,
we load all available data and then filter and structure them according to our
criteria.
We use the same criteria as Kemker et al. Classes and samples are restricted in
the following way:
Classes:
- no restriction according to ontology file (parsed from ontology.json)
- no parent / child relationship (parsed from ontology.json)
- confidence level > 70% (data was copied from website into txt file)
- number of samples: we only take classes that have more samples than
a certain threshold
Samples:
- since samples can have multiple labels, we only use samples which
only belong to one of the classes we use
- we exclude samples that don't have the full length of 10 seconds
The chosen classes and samples are then split into train and test data and
saved to a pickle file.
"""
import numpy as np
import pickle
import tensorflow as tf
import os
import json
from warnings import warn
warn('The script was created for one time usage and has to be adapted when ' +
'reusing it. All paths specified here are absolute.')
# Tensorflow eager mode needs to be enabled for dataset mapping to work!
tf.enable_eager_execution()
# Set paths and parameters
data_dir = '../../datasets/sequential/audioset/'
download_dir = os.path.join(data_dir,'audioset_download')
fpath_conf_data = os.path.join(data_dir, 'confidence_data.csv')
fpath_label_inds = os.path.join(data_dir, 'class_labels_indices.csv')
fpath_ontology = os.path.join(data_dir, 'ontology.json')
target_path = os.path.join(data_dir, 'audioset_data_balanced.pickle')
n_classes = 100
n_sample = 1000
test_frac = 0.20
### Load data by serializing files and applying decode function.
def decode(serialized_example):
"""Decode data from TFRecord files.
Args:
serialized_example: serialized_example as created by
tf.data.TFRecordDataset
Returns:
(tuple): Tuple containing:
- **audio** (numpy.ndarray): Array of shape (10,128) representing one
sample with 10 timesteps and 128 features
- **label** (numpy.ndarray): Array of shape (1,) containing the class
of the corresponding sample
"""
sequence_features = {
'audio_embedding': tf.FixedLenSequenceFeature([], tf.string),
}
context_features = {
'start_time_seconds': tf.FixedLenFeature([], tf.float32),
'labels': tf.VarLenFeature(dtype=tf.int64),
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized_example,
sequence_features=sequence_features,
context_features=context_features
)
audio = tf.decode_raw(sequence_parsed['audio_embedding'], tf.uint8)
label = tf.cast(context_parsed['labels'], tf.int64)
return audio, label
# Apply decode function to all dataset entries using map function.
# Take files from all three data sets since we repartition anyway.
fpaths = []
for path, subdirs, files in os.walk(download_dir):
for name in files:
if 'tfrecord' in name:
fpaths.append(os.path.join(path, name))
# Create dataset and decode
dataset = tf.data.TFRecordDataset(fpaths)
dataset = dataset.map(decode)
# Extract data to lists
x = []
y = []
for d in dataset:
x.append(d[0].numpy())
y.append(tf.sparse.to_dense(tf.sparse.reorder(d[1])).numpy())
### Filter classes as described above.
# Parse confidence values
conf_data = {}
with open(fpath_conf_data) as f:
for line in f:
tokens = line.split()
# parse confidence
c = 0
for t in tokens:
if t.find('%') is not -1:
c = int(t[:-1])
# parse class name
n = ''
for t in tokens:
if t.find('%') == -1 and t != '-':
if n == '':
n = t
else:
n = n+' '+t
else:
break
conf_data.update({n:c})
# Parse class numbers from label csv file
l = -1
csv_data = {}
with open(fpath_label_inds) as f:
for line in f:
if l == -1:
l += 1
continue
tokens = line.split('"')
n = tokens[1]
csv_data.update({n:l})
l +=1
# Parse ontology info from json file
with open(fpath_ontology, 'r') as f:
json_data = json.load(f)
# Put all data into a single list.
all_data = []
for j in json_data:
if j['name'] in conf_data.keys():
class_info = {
'name' : j['name'],
'restricted' : j['restrictions'] != [],
'has_child' : j['child_ids'] != [],
'conf' : conf_data[j['name']],
'id' : csv_data[j['name']]
}
all_data.append(class_info)
# Filter classes
classes = []
for c in all_data:
if not c['restricted'] and not c['has_child'] and c['conf'] >= 70:
classes.append(c['id'])
### Filter the samples.
# Find samples that belong to only one of the potential classes.
# We also exclude some samples that don't have data for the full 10 seconds.
# First discard labels that are not in the set of potential classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],classes))
# Find samples with one label
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that are shorter than 10 seconds (to be excluded)
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Count number of valid samples for potential classes
y_single = np.asarray([y_fil[i][0] for i in valid_idx])
num_samples = [len(np.where(y_single == i)[0]) for i in classes]
# Take the n classes with the highest number of samples
n_sample_cutoff = np.sort(num_samples)[-n_classes]
class_idx = np.where(np.asarray(num_samples) >= n_sample_cutoff)[0]
our_classes = [classes[i] for i in class_idx]
### Filter the data again according the the chosen classes
y_fil = []
for i in range(len(y)):
y_fil.append( np.intersect1d(y[i],our_classes))
# Find samples that belong to only one of the potential classes
n_labels = np.asarray([len(y) for y in y_fil])
single_label_idx = np.where(n_labels == 1)[0]
# Find samples that dont are shorter than 10 seconds
too_short = np.where(np.asarray([x.shape[0] for x in x]) != 10)[0]
# Construct the set of valid samples
valid_idx = np.setdiff1d(single_label_idx,too_short)
# Restructure data and relabel the classes to be between 0 and n_classes
y_data = [y_fil[i][0] for i in valid_idx]
y_data = [np.where(np.asarray(our_classes) == i)[0][0] for i in y_data]
y_data = np.asarray(y_data)
x_data = [x[i] for i in valid_idx]
x_data = np.stack(x_data)
### Split into test and train and restrict the number of samples per class
np.random.seed(42)
n_train = int(n_sample * (1-test_frac))
n_test = int(n_sample * test_frac)
train_ind = []
test_ind = []
for i in range(n_classes):
sample_idx = np.where(y_data == i)[0]
n_sample_class = len(sample_idx)
rand_idx = np.arange(n_sample_class)
np.random.shuffle(rand_idx)
train_ind.extend(sample_idx[rand_idx[0:n_train]])
test_ind.extend(sample_idx[rand_idx[n_train:n_sample]])
train_ind = np.asarray(train_ind)
test_ind = np.asarray(test_ind)
sub_sample_idx = np.hstack((train_ind,test_ind))
x_data_sub = x_data[sub_sample_idx,:,:]
y_data_sub = y_data[sub_sample_idx]
train_ind = np.arange(0,len(train_ind))
test_ind = np.arange(len(train_ind),len(train_ind)+len(test_ind))
### Save data
with open(target_path, 'wb') as f:
pickle.dump([x_data_sub, y_data_sub, train_ind, test_ind], f)
| en | 0.846465 | #!/usr/bin/env python3 # Copyright 2020 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # title :data/timeseries/preprocess_audioset.py # author :be # contact :<EMAIL> # created :31/03/2020 # version :1.0 # python_version :3.7 Script to structure the audioset dataset, which can then be used via :class:`data.timeseries.audioset_data.AudiosetData`. The result of this script is available at https://www.dropbox.com/s/07dfeeuf5aq4w1h/audioset_data_balanced?dl=0 If you want to recreate or modify this dataset, download the audioset data from https://research.google.com/audioset/download.html and extract the tar.gz into the following folder: ``datasets/sequential/audioset/audioset_download``. Subsequently executing this script will create a pickle file containing the 100 class subset of audioset used in this study. The dataset is stored in tensorflow files. Since we work with pytorch and there is no utility to read tensorflow files, we extract the data and safe them as numpy arrays in a pickle file. Furthermore the data are preprocessed to fit our continual learning experiments. The original dataset provides three subsets with different compositions of samples and classes. Since we only work with a subset of classes and samples, we load all available data and then filter and structure them according to our criteria. We use the same criteria as Kemker et al. Classes and samples are restricted in the following way: Classes: - no restriction according to ontology file (parsed from ontology.json) - no parent / child relationship (parsed from ontology.json) - confidence level > 70% (data was copied from website into txt file) - number of samples: we only take classes that have more samples than a certain threshold Samples: - since samples can have multiple labels, we only use samples which only belong to one of the classes we use - we exclude samples that don't have the full length of 10 seconds The chosen classes and samples are then split into train and test data and saved to a pickle file. # Tensorflow eager mode needs to be enabled for dataset mapping to work! # Set paths and parameters ### Load data by serializing files and applying decode function. Decode data from TFRecord files. Args: serialized_example: serialized_example as created by tf.data.TFRecordDataset Returns: (tuple): Tuple containing: - **audio** (numpy.ndarray): Array of shape (10,128) representing one sample with 10 timesteps and 128 features - **label** (numpy.ndarray): Array of shape (1,) containing the class of the corresponding sample # Apply decode function to all dataset entries using map function. # Take files from all three data sets since we repartition anyway. # Create dataset and decode # Extract data to lists ### Filter classes as described above. # Parse confidence values # parse confidence # parse class name # Parse class numbers from label csv file # Parse ontology info from json file # Put all data into a single list. # Filter classes ### Filter the samples. # Find samples that belong to only one of the potential classes. # We also exclude some samples that don't have data for the full 10 seconds. # First discard labels that are not in the set of potential classes # Find samples with one label # Find samples that are shorter than 10 seconds (to be excluded) # Construct the set of valid samples # Count number of valid samples for potential classes # Take the n classes with the highest number of samples ### Filter the data again according the the chosen classes # Find samples that belong to only one of the potential classes # Find samples that dont are shorter than 10 seconds # Construct the set of valid samples # Restructure data and relabel the classes to be between 0 and n_classes ### Split into test and train and restrict the number of samples per class ### Save data | 2.231249 | 2 |
Posts/viewsAPI.py | CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution | 3 | 483 | from django.conf import settings
from django.core import serializers
from django.utils import timezone
import requests
from Posts.commentModel import Comments
#from Posts.commentView import add_Comment
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from django.shortcuts import HttpResponse, render
from requests import get
from .serializers import CommentSerializer, PostSerializer
from Author.serializers import LikeSerializer
from Author.models import Like
from Author.views import updateForeignAuthors, GetForeignAuthors
from .models import Post, Author
from .form import PostForm
from Posts.commentForm import CommentForm
import json
import uuid
import re
import base64
from django.db.models import Q
import django.core
from permissions import CustomAuthentication, AccessPermission
from django.core.paginator import Paginator
import traceback
def newPost(request, uid=None, auth_pk=None):
form = PostForm(request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
descirption = form.cleaned_data['description']
categories = form.cleaned_data['categories'].split(' ')
visibility = form.cleaned_data['visibility']
unlisted = form.cleaned_data['unlisted']
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
source = settings.SERVER_URL + "/"
origin = settings.SERVER_URL + "/"
author_id = Author.objects.get(pk=auth_pk)
id = author_id.url
author = json.loads(serializers.serialize('json', Author.objects.filter(pk=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
id = id + '/posts/' + uid + "/"
comments_id = id + "comments/"
published = timezone.now()
posts = Post(pk=uid, id=id, author_id=author_id, author=author, title=title, source=source, origin=origin, description=descirption, contentType=contentType, count=0, size=10, categories=categories,visibility=visibility, unlisted=unlisted, published=published, content=content, comments=comments_id)
posts.save()
return True
else:
print(request.data)
print(form.errors)
print(form.data)
return False
def add_Comment(request, post_pk, auth_pk, uid=None):
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
updateForeignAuthors()
published = timezone.now()
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
author_id = json.loads(serializers.serialize('json', Author.objects.filter(email=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
post = Post.objects.get(pk = post_pk)
post_pk_str = post_pk
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
comment_id = getattr(post, 'comments') + uid
comments = Comments(pk=uid, id=comment_id, Post_pk=post, Post_pk_str = post_pk_str, auth_pk_str = auth_pk, author=author_id, size=10, published=published, contentType=contentType, content=content)
comments.save()
return True
else:
print(request.data)
return False
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostLikesView(request, post_pk, auth_pk):
post = Post.objects.get(post_pk = post_pk)
author = Author.objects.get(pk = auth_pk)
likeObjs = Like.objects.filter(~Q(auth_pk = author), object = post.id)
Likes = LikeSerializer(likeObjs, read_only=True, many=True)
likes = []
for l in Likes.data:
like = {}
for key in l:
if(key != "context"):
like[key] = l[key]
like["@context"] = l["context"]
like["author"] = json.loads(django.core.serializers.serialize('json', Author.objects.filter(id=l["author"]), fields=('type', 'id', 'displayName', 'host', 'url', 'github',)))[0]['fields']
likes.append(like)
response_dict = {
"type": "likes",
"items": likes
}
return Response(response_dict)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostsList(request, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
if auth_pk:
try:
author = Author.objects.get(auth_pk=auth_pk)
posts = Post.objects.filter(author_id=author, id__icontains = "linkedspace")
code = status.HTTP_200_OK
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
except Exception as e:
print(e)
data = {}
code = status.HTTP_400_BAD_REQUEST
else:
code = status.HTTP_200_OK
posts = Post.objects.filter(id__icontains = "linkedspace")
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
elif request.method == 'POST':
if newPost(request, auth_pk=request.data['auth_pk']):
code = status.HTTP_201_CREATED
post = Post.objects.latest("published")
data = PostSerializer(post).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentListView(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
comments = Comments.objects.filter(Post_pk_str=post_pk)
post = Post.objects.get(pk=post_pk)
post_id = getattr(post, 'id')
comment_id = getattr(post, 'comments')
paginator = Paginator(comments, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
response_dict = {
"type": "comments",
"page": page_number,
"size": page_size,
"post": post_id,
"id": comment_id,
"comments": serializer.data,
}
return Response(response_dict)
elif request.method == 'POST':
if add_Comment(request, post_pk=request.data['Post_pk'], auth_pk=request.data['auth_pk']):
code = status.HTTP_202_ACCEPTED
comment = Comments.objects.latest("published")
data = CommentSerializer(comment).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST', 'PUT', 'DELETE', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostDetail(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
if 'title' in request.data.keys():
post.title = request.data['title']
if 'description' in request.data.keys():
post.description = request.data['description']
if 'categories' in request.data.keys():
post.categories = request.data['categories'].split(' ')
if 'visibility' in request.data.keys():
post.visibility = request.data['visibility']
if 'unlisted' in request.data.keys():
post.unlisted = request.data['unlisted']
if 'contentType' in request.data.keys():
post.contentType = request.data['contentType']
if post.contentType == "application/app":
post.content = request.FILES['file'].read() #Inputfile
elif post.contentType in ["image/png", "image/jpeg",]:
post.content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
post.content = request.data["text"]
post.save()
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'PUT':
try:
code = status.HTTP_201_CREATED
assert newPost(request, post_pk, request.data['auth_pk'])==True
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'DELETE':
try:
post = Post.objects.get(post_pk=post_pk)
post.delete()
code = status.HTTP_200_OK
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET', 'POST', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentDetail(request, post_pk, comment_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
if 'contentType' in request.data.keys():
comment.contentType = request.data['contentType']
if 'text' in request.data.keys():
comment.content = request.data['text']
comment.save()
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET',])
def connection(request, auth_id=None):
data = []
team3 = get('https://social-dis.herokuapp.com/posts', auth=('socialdistribution_t03','c404t03'))
if team3.status_code == 200:
data.append(team3.json())
team15 = get('https://unhindled.herokuapp.com/service/allposts/', auth=('connectionsuperuser','404connection'))
if team15.status_code == 200:
data.append(team15.json())
team17 = get('https://cmput404f21t17.herokuapp.com/service/connect/public/', auth=('4cbe2def-feaa-4bb7-bce5-<PASSWORD>','123456'))
if team17.status_code == 200:
data.append(team17.json())
return Response({'connection': data})
| from django.conf import settings
from django.core import serializers
from django.utils import timezone
import requests
from Posts.commentModel import Comments
#from Posts.commentView import add_Comment
from rest_framework import status
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from django.shortcuts import HttpResponse, render
from requests import get
from .serializers import CommentSerializer, PostSerializer
from Author.serializers import LikeSerializer
from Author.models import Like
from Author.views import updateForeignAuthors, GetForeignAuthors
from .models import Post, Author
from .form import PostForm
from Posts.commentForm import CommentForm
import json
import uuid
import re
import base64
from django.db.models import Q
import django.core
from permissions import CustomAuthentication, AccessPermission
from django.core.paginator import Paginator
import traceback
def newPost(request, uid=None, auth_pk=None):
form = PostForm(request.POST, request.FILES)
if form.is_valid():
title = form.cleaned_data['title']
descirption = form.cleaned_data['description']
categories = form.cleaned_data['categories'].split(' ')
visibility = form.cleaned_data['visibility']
unlisted = form.cleaned_data['unlisted']
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
source = settings.SERVER_URL + "/"
origin = settings.SERVER_URL + "/"
author_id = Author.objects.get(pk=auth_pk)
id = author_id.url
author = json.loads(serializers.serialize('json', Author.objects.filter(pk=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
id = id + '/posts/' + uid + "/"
comments_id = id + "comments/"
published = timezone.now()
posts = Post(pk=uid, id=id, author_id=author_id, author=author, title=title, source=source, origin=origin, description=descirption, contentType=contentType, count=0, size=10, categories=categories,visibility=visibility, unlisted=unlisted, published=published, content=content, comments=comments_id)
posts.save()
return True
else:
print(request.data)
print(form.errors)
print(form.data)
return False
def add_Comment(request, post_pk, auth_pk, uid=None):
form = CommentForm(request.POST, request.FILES)
if form.is_valid():
updateForeignAuthors()
published = timezone.now()
contentType = form.cleaned_data['contentType']
if contentType == "application/app":
content = request.FILES['file'].read() #Inputfile
elif contentType in ["image/png", "image/jpeg",]:
content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
content = form.cleaned_data["text"]
author_id = json.loads(serializers.serialize('json', Author.objects.filter(email=auth_pk), fields=('type', 'id', 'host', 'displayName', 'url', 'github',)))[0]['fields']
post = Post.objects.get(pk = post_pk)
post_pk_str = post_pk
if uid == None:
r_uid = uuid.uuid4().hex
uid = re.sub('-', '', r_uid)
comment_id = getattr(post, 'comments') + uid
comments = Comments(pk=uid, id=comment_id, Post_pk=post, Post_pk_str = post_pk_str, auth_pk_str = auth_pk, author=author_id, size=10, published=published, contentType=contentType, content=content)
comments.save()
return True
else:
print(request.data)
return False
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostLikesView(request, post_pk, auth_pk):
post = Post.objects.get(post_pk = post_pk)
author = Author.objects.get(pk = auth_pk)
likeObjs = Like.objects.filter(~Q(auth_pk = author), object = post.id)
Likes = LikeSerializer(likeObjs, read_only=True, many=True)
likes = []
for l in Likes.data:
like = {}
for key in l:
if(key != "context"):
like[key] = l[key]
like["@context"] = l["context"]
like["author"] = json.loads(django.core.serializers.serialize('json', Author.objects.filter(id=l["author"]), fields=('type', 'id', 'displayName', 'host', 'url', 'github',)))[0]['fields']
likes.append(like)
response_dict = {
"type": "likes",
"items": likes
}
return Response(response_dict)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostsList(request, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
if auth_pk:
try:
author = Author.objects.get(auth_pk=auth_pk)
posts = Post.objects.filter(author_id=author, id__icontains = "linkedspace")
code = status.HTTP_200_OK
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
except Exception as e:
print(e)
data = {}
code = status.HTTP_400_BAD_REQUEST
else:
code = status.HTTP_200_OK
posts = Post.objects.filter(id__icontains = "linkedspace")
paginator = Paginator(posts, page_size)
page_obj = paginator.get_page(page_number)
data = PostSerializer(page_obj.object_list, many=True).data
elif request.method == 'POST':
if newPost(request, auth_pk=request.data['auth_pk']):
code = status.HTTP_201_CREATED
post = Post.objects.latest("published")
data = PostSerializer(post).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentListView(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
comments = Comments.objects.filter(Post_pk_str=post_pk)
post = Post.objects.get(pk=post_pk)
post_id = getattr(post, 'id')
comment_id = getattr(post, 'comments')
paginator = Paginator(comments, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
response_dict = {
"type": "comments",
"page": page_number,
"size": page_size,
"post": post_id,
"id": comment_id,
"comments": serializer.data,
}
return Response(response_dict)
elif request.method == 'POST':
if add_Comment(request, post_pk=request.data['Post_pk'], auth_pk=request.data['auth_pk']):
code = status.HTTP_202_ACCEPTED
comment = Comments.objects.latest("published")
data = CommentSerializer(comment).data
else:
code = status.HTTP_400_BAD_REQUEST
data = {}
return Response(data, code)
@api_view(['GET', 'POST', 'PUT', 'DELETE', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def PostDetail(request, post_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
post = Post.objects.get(post_pk=post_pk)
if 'title' in request.data.keys():
post.title = request.data['title']
if 'description' in request.data.keys():
post.description = request.data['description']
if 'categories' in request.data.keys():
post.categories = request.data['categories'].split(' ')
if 'visibility' in request.data.keys():
post.visibility = request.data['visibility']
if 'unlisted' in request.data.keys():
post.unlisted = request.data['unlisted']
if 'contentType' in request.data.keys():
post.contentType = request.data['contentType']
if post.contentType == "application/app":
post.content = request.FILES['file'].read() #Inputfile
elif post.contentType in ["image/png", "image/jpeg",]:
post.content = base64.b64encode(request.FILES['file'].read()) #Inputfile
else:
post.content = request.data["text"]
post.save()
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'PUT':
try:
code = status.HTTP_201_CREATED
assert newPost(request, post_pk, request.data['auth_pk'])==True
post = Post.objects.get(post_pk=post_pk)
serializer = PostSerializer(post)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
elif request.method == 'DELETE':
try:
post = Post.objects.get(post_pk=post_pk)
post.delete()
code = status.HTTP_200_OK
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
post = Post.objects.all()
paginator = Paginator(post, page_size)
page_obj = paginator.get_page(page_number)
serializer = PostSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET', 'POST', ])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def commentDetail(request, post_pk, comment_pk, auth_pk=None):
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
if request.method == 'GET':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_404_NOT_FOUND
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
elif request.method == 'POST':
try:
code = status.HTTP_200_OK
comment = Comments.objects.get(pk=comment_pk)
if 'contentType' in request.data.keys():
comment.contentType = request.data['contentType']
if 'text' in request.data.keys():
comment.content = request.data['text']
comment.save()
serializer = CommentSerializer(comment)
except Exception as e:
print(e)
code = status.HTTP_400_BAD_REQUEST
comment = Comments.objects.all()
paginator = Paginator(comment, page_size)
page_obj = paginator.get_page(page_number)
serializer = CommentSerializer(page_obj.object_list, many=True)
return Response(serializer.data, code)
@api_view(['GET',])
def connection(request, auth_id=None):
data = []
team3 = get('https://social-dis.herokuapp.com/posts', auth=('socialdistribution_t03','c404t03'))
if team3.status_code == 200:
data.append(team3.json())
team15 = get('https://unhindled.herokuapp.com/service/allposts/', auth=('connectionsuperuser','404connection'))
if team15.status_code == 200:
data.append(team15.json())
team17 = get('https://cmput404f21t17.herokuapp.com/service/connect/public/', auth=('4cbe2def-feaa-4bb7-bce5-<PASSWORD>','123456'))
if team17.status_code == 200:
data.append(team17.json())
return Response({'connection': data})
| en | 0.390649 | #from Posts.commentView import add_Comment #Inputfile #Inputfile #Inputfile #Inputfile #Inputfile #Inputfile | 1.965041 | 2 |
workers/tests/test_array_element.py | Open-EO/openeo-sentinelhub-python-driver | 2 | 484 | <reponame>Open-EO/openeo-sentinelhub-python-driver
import pytest
import sys, os
import xarray as xr
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import process
from process._common import ProcessArgumentInvalid, ProcessArgumentRequired
@pytest.fixture
def generate_data():
def _construct(
data = [[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]],
dims = ('t','y','x','band'),
reduce_by = "band",
as_list = False
):
if as_list:
return data
xrdata = xr.DataArray(
data,
dims=dims,
attrs={'reduce_by': [reduce_by]},
)
return xrdata
return _construct
@pytest.fixture
def execute_array_element_process(generate_data):
def wrapped(data_arguments={}, index=None, return_nodata=None):
arguments = {}
if data_arguments is not None: arguments["data"] = generate_data(**data_arguments)
if index is not None: arguments["index"] = index
if return_nodata is not None: arguments["return_nodata"] = return_nodata
return process.array_element.array_elementEOTask(None, "" , None, {}, "arrayel1").process(arguments)
return wrapped
###################################
# tests:
###################################
@pytest.mark.parametrize('data,return_nodata,index,expected_result', [
([9,8,7,6,5], None, 2, 7),
(["A","B","C"], None, 0, "A"),
([], True, 0, None)
])
def test_examples(execute_array_element_process, data, index, return_nodata, expected_result):
"""
Test array_element process with examples from https://open-eo.github.io/openeo-api/processreference/#array_element
"""
data_arguments = {"data": data, "as_list": True}
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=return_nodata)
assert result == expected_result
@pytest.mark.parametrize('data,index,reduce_by,expected_data,expected_dims', [
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 0, "band", [[[0.1, 0.15], [0.05, -0.9]]], ('t','y','x')),
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 1, "y", [[[0.05, 0.1], [-0.9, 0.05]]], ('t','x','band')),
])
def test_with_xarray(execute_array_element_process, generate_data, data, index, reduce_by, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays
"""
expected_result = generate_data(data=expected_data, dims=expected_dims, reduce_by=reduce_by)
result = execute_array_element_process(data_arguments={"data": data, "reduce_by": reduce_by}, index=index)
xr.testing.assert_allclose(result, expected_result)
def test_with_xarray_out_bounds(execute_array_element_process, generate_data):
"""
Test array_element process with xarray.DataArrays with out of bounds index
"""
with pytest.raises(ProcessArgumentInvalid) as ex:
result = execute_array_element_process(index=5)
assert ex.value.args[0] == "The argument 'index' in process 'array_element' is invalid: Index out of bounds."
@pytest.mark.parametrize('data_arguments,index,expected_data,expected_dims', [
({}, 5, [[[np.nan, np.nan], [np.nan, np.nan]]], ('t','y','x')),
])
def test_with_xarray_out_bounds_return_nodata(execute_array_element_process, generate_data, data_arguments, index, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays with out of bounds index and return_no_data
"""
expected_result = generate_data(expected_data, dims=expected_dims)
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=True)
xr.testing.assert_equal(result, expected_result)
| import pytest
import sys, os
import xarray as xr
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import process
from process._common import ProcessArgumentInvalid, ProcessArgumentRequired
@pytest.fixture
def generate_data():
def _construct(
data = [[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]],
dims = ('t','y','x','band'),
reduce_by = "band",
as_list = False
):
if as_list:
return data
xrdata = xr.DataArray(
data,
dims=dims,
attrs={'reduce_by': [reduce_by]},
)
return xrdata
return _construct
@pytest.fixture
def execute_array_element_process(generate_data):
def wrapped(data_arguments={}, index=None, return_nodata=None):
arguments = {}
if data_arguments is not None: arguments["data"] = generate_data(**data_arguments)
if index is not None: arguments["index"] = index
if return_nodata is not None: arguments["return_nodata"] = return_nodata
return process.array_element.array_elementEOTask(None, "" , None, {}, "arrayel1").process(arguments)
return wrapped
###################################
# tests:
###################################
@pytest.mark.parametrize('data,return_nodata,index,expected_result', [
([9,8,7,6,5], None, 2, 7),
(["A","B","C"], None, 0, "A"),
([], True, 0, None)
])
def test_examples(execute_array_element_process, data, index, return_nodata, expected_result):
"""
Test array_element process with examples from https://open-eo.github.io/openeo-api/processreference/#array_element
"""
data_arguments = {"data": data, "as_list": True}
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=return_nodata)
assert result == expected_result
@pytest.mark.parametrize('data,index,reduce_by,expected_data,expected_dims', [
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 0, "band", [[[0.1, 0.15], [0.05, -0.9]]], ('t','y','x')),
([[[[0.1, 0.15], [0.15, 0.2]], [[0.05, 0.1], [-0.9, 0.05]]]], 1, "y", [[[0.05, 0.1], [-0.9, 0.05]]], ('t','x','band')),
])
def test_with_xarray(execute_array_element_process, generate_data, data, index, reduce_by, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays
"""
expected_result = generate_data(data=expected_data, dims=expected_dims, reduce_by=reduce_by)
result = execute_array_element_process(data_arguments={"data": data, "reduce_by": reduce_by}, index=index)
xr.testing.assert_allclose(result, expected_result)
def test_with_xarray_out_bounds(execute_array_element_process, generate_data):
"""
Test array_element process with xarray.DataArrays with out of bounds index
"""
with pytest.raises(ProcessArgumentInvalid) as ex:
result = execute_array_element_process(index=5)
assert ex.value.args[0] == "The argument 'index' in process 'array_element' is invalid: Index out of bounds."
@pytest.mark.parametrize('data_arguments,index,expected_data,expected_dims', [
({}, 5, [[[np.nan, np.nan], [np.nan, np.nan]]], ('t','y','x')),
])
def test_with_xarray_out_bounds_return_nodata(execute_array_element_process, generate_data, data_arguments, index, expected_data, expected_dims):
"""
Test array_element process with xarray.DataArrays with out of bounds index and return_no_data
"""
expected_result = generate_data(expected_data, dims=expected_dims)
result = execute_array_element_process(data_arguments=data_arguments, index=index, return_nodata=True)
xr.testing.assert_equal(result, expected_result) | en | 0.436951 | ################################### # tests: ################################### Test array_element process with examples from https://open-eo.github.io/openeo-api/processreference/#array_element Test array_element process with xarray.DataArrays Test array_element process with xarray.DataArrays with out of bounds index Test array_element process with xarray.DataArrays with out of bounds index and return_no_data | 2.15261 | 2 |
gn/gn_to_bp.py | despairblue/esy-skia | 2,151 | 485 | <reponame>despairblue/esy-skia
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Generate Android.bp for Skia from GN configuration.
import json
import os
import pprint
import string
import subprocess
import tempfile
import gn_to_bp_utils
# First we start off with a template for Android.bp,
# with holes for source lists and include directories.
bp = string.Template('''// This file is autogenerated by gn_to_bp.py.
cc_library_static {
name: "libskia",
cflags: [
$cflags
],
cppflags:[
$cflags_cc
],
export_include_dirs: [
$export_includes
],
local_include_dirs: [
$local_includes
],
srcs: [
$srcs
],
arch: {
arm: {
srcs: [
$arm_srcs
],
neon: {
srcs: [
$arm_neon_srcs
],
},
},
arm64: {
srcs: [
$arm64_srcs
],
},
mips: {
srcs: [
$none_srcs
],
},
mips64: {
srcs: [
$none_srcs
],
},
x86: {
srcs: [
$x86_srcs
],
cflags: [
// Clang seems to think new/malloc will only be 4-byte aligned
// on x86 Android. We're pretty sure it's actually 8-byte
// alignment. tests/OverAlignedTest.cpp has more information,
// and should fail if we're wrong.
"-Wno-over-aligned"
],
},
x86_64: {
srcs: [
$x86_srcs
],
},
},
defaults: ["skia_deps",
"skia_pgo",
],
}
// Build libskia with PGO by default.
// Location of PGO profile data is defined in build/soong/cc/pgo.go
// and is separate from skia.
// To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable
// or set enable_profile_use property to false.
cc_defaults {
name: "skia_pgo",
pgo: {
instrumentation: true,
profile_file: "hwui/hwui.profdata",
benchmarks: ["hwui", "skia"],
enable_profile_use: true,
},
}
// "defaults" property to disable profile use for Skia tools and benchmarks.
cc_defaults {
name: "skia_pgo_no_profile_use",
defaults: [
"skia_pgo",
],
pgo: {
enable_profile_use: false,
},
}
cc_defaults {
name: "skia_deps",
shared_libs: [
"libEGL",
"libGLESv2",
"libdng_sdk",
"libexpat",
"libft2",
"libheif",
"libicui18n",
"libicuuc",
"libjpeg",
"liblog",
"libpiex",
"libpng",
"libvulkan",
"libz",
"libcutils",
"libnativewindow",
],
static_libs: [
"libarect",
"libsfntly",
"libwebp-decode",
"libwebp-encode",
],
group_static_libs: true,
}
cc_defaults {
name: "skia_tool_deps",
defaults: [
"skia_deps",
"skia_pgo_no_profile_use"
],
static_libs: [
"libjsoncpp",
"libskia",
],
cflags: [
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
}
cc_test {
name: "skia_dm",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$dm_includes
],
srcs: [
$dm_srcs
],
shared_libs: [
"libbinder",
"libutils",
],
}
cc_test {
name: "skia_nanobench",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$nanobench_includes
],
srcs: [
$nanobench_srcs
],
data: [
"resources/*",
],
}''')
# We'll run GN to get the main source lists and include directories for Skia.
gn_args = {
'is_official_build': 'true',
'skia_enable_tools': 'true',
'skia_enable_skottie': 'false', # requires rapidjson third-party
'skia_use_libheif': 'true',
'skia_use_vulkan': 'true',
'target_cpu': '"none"',
'target_os': '"android"',
'skia_vulkan_header': '"Skia_Vulkan_Android.h"',
}
js = gn_to_bp_utils.GenerateJSONFromGN(gn_args)
def strip_slashes(lst):
return {str(p.lstrip('/')) for p in lst}
srcs = strip_slashes(js['targets']['//:skia']['sources'])
cflags = strip_slashes(js['targets']['//:skia']['cflags'])
cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc'])
local_includes = strip_slashes(js['targets']['//:skia']['include_dirs'])
export_includes = strip_slashes(js['targets']['//:public']['include_dirs'])
defines = [str(d) for d in js['targets']['//:skia']['defines']]
dm_srcs = strip_slashes(js['targets']['//:dm']['sources'])
dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs'])
nanobench_target = js['targets']['//:nanobench']
nanobench_srcs = strip_slashes(nanobench_target['sources'])
nanobench_includes = strip_slashes(nanobench_target['include_dirs'])
gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None)
gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia')
gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources',
nanobench_srcs, 'skia')
# skcms is a little special, kind of a second-party library.
srcs .add("third_party/skcms/skcms.c")
local_includes.add("third_party/skcms")
dm_includes .add("third_party/skcms")
# No need to list headers.
srcs = {s for s in srcs if not s.endswith('.h')}
dm_srcs = {s for s in dm_srcs if not s.endswith('.h')}
nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')}
cflags = gn_to_bp_utils.CleanupCFlags(cflags)
cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc)
# We need to add the include path to the vulkan defines and header file set in
# then skia_vulkan_header gn arg that is used for framework builds.
local_includes.add("platform_tools/android/vulkan")
export_includes.add("platform_tools/android/vulkan")
here = os.path.dirname(__file__)
defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni'))
gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines)
# Turn a list of strings into the style bpfmt outputs.
def bpfmt(indent, lst, sort=True):
if sort:
lst = sorted(lst)
return ('\n' + ' '*indent).join('"%s",' % v for v in lst)
# OK! We have everything to fill in Android.bp...
with open('Android.bp', 'w') as f:
print >>f, bp.substitute({
'export_includes': bpfmt(8, export_includes),
'local_includes': bpfmt(8, local_includes),
'srcs': bpfmt(8, srcs),
'cflags': bpfmt(8, cflags, False),
'cflags_cc': bpfmt(8, cflags_cc),
'arm_srcs': bpfmt(16, defs['armv7']),
'arm_neon_srcs': bpfmt(20, defs['neon']),
'arm64_srcs': bpfmt(16, defs['arm64'] +
defs['crc32']),
'none_srcs': bpfmt(16, defs['none']),
'x86_srcs': bpfmt(16, defs['sse2'] +
defs['ssse3'] +
defs['sse41'] +
defs['sse42'] +
defs['avx' ] +
defs['hsw' ]),
'dm_includes' : bpfmt(8, dm_includes),
'dm_srcs' : bpfmt(8, dm_srcs),
'nanobench_includes' : bpfmt(8, nanobench_includes),
'nanobench_srcs' : bpfmt(8, nanobench_srcs),
})
| #!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Generate Android.bp for Skia from GN configuration.
import json
import os
import pprint
import string
import subprocess
import tempfile
import gn_to_bp_utils
# First we start off with a template for Android.bp,
# with holes for source lists and include directories.
bp = string.Template('''// This file is autogenerated by gn_to_bp.py.
cc_library_static {
name: "libskia",
cflags: [
$cflags
],
cppflags:[
$cflags_cc
],
export_include_dirs: [
$export_includes
],
local_include_dirs: [
$local_includes
],
srcs: [
$srcs
],
arch: {
arm: {
srcs: [
$arm_srcs
],
neon: {
srcs: [
$arm_neon_srcs
],
},
},
arm64: {
srcs: [
$arm64_srcs
],
},
mips: {
srcs: [
$none_srcs
],
},
mips64: {
srcs: [
$none_srcs
],
},
x86: {
srcs: [
$x86_srcs
],
cflags: [
// Clang seems to think new/malloc will only be 4-byte aligned
// on x86 Android. We're pretty sure it's actually 8-byte
// alignment. tests/OverAlignedTest.cpp has more information,
// and should fail if we're wrong.
"-Wno-over-aligned"
],
},
x86_64: {
srcs: [
$x86_srcs
],
},
},
defaults: ["skia_deps",
"skia_pgo",
],
}
// Build libskia with PGO by default.
// Location of PGO profile data is defined in build/soong/cc/pgo.go
// and is separate from skia.
// To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable
// or set enable_profile_use property to false.
cc_defaults {
name: "skia_pgo",
pgo: {
instrumentation: true,
profile_file: "hwui/hwui.profdata",
benchmarks: ["hwui", "skia"],
enable_profile_use: true,
},
}
// "defaults" property to disable profile use for Skia tools and benchmarks.
cc_defaults {
name: "skia_pgo_no_profile_use",
defaults: [
"skia_pgo",
],
pgo: {
enable_profile_use: false,
},
}
cc_defaults {
name: "skia_deps",
shared_libs: [
"libEGL",
"libGLESv2",
"libdng_sdk",
"libexpat",
"libft2",
"libheif",
"libicui18n",
"libicuuc",
"libjpeg",
"liblog",
"libpiex",
"libpng",
"libvulkan",
"libz",
"libcutils",
"libnativewindow",
],
static_libs: [
"libarect",
"libsfntly",
"libwebp-decode",
"libwebp-encode",
],
group_static_libs: true,
}
cc_defaults {
name: "skia_tool_deps",
defaults: [
"skia_deps",
"skia_pgo_no_profile_use"
],
static_libs: [
"libjsoncpp",
"libskia",
],
cflags: [
"-Wno-unused-parameter",
"-Wno-unused-variable",
],
}
cc_test {
name: "skia_dm",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$dm_includes
],
srcs: [
$dm_srcs
],
shared_libs: [
"libbinder",
"libutils",
],
}
cc_test {
name: "skia_nanobench",
defaults: [
"skia_tool_deps"
],
local_include_dirs: [
$nanobench_includes
],
srcs: [
$nanobench_srcs
],
data: [
"resources/*",
],
}''')
# We'll run GN to get the main source lists and include directories for Skia.
gn_args = {
'is_official_build': 'true',
'skia_enable_tools': 'true',
'skia_enable_skottie': 'false', # requires rapidjson third-party
'skia_use_libheif': 'true',
'skia_use_vulkan': 'true',
'target_cpu': '"none"',
'target_os': '"android"',
'skia_vulkan_header': '"Skia_Vulkan_Android.h"',
}
js = gn_to_bp_utils.GenerateJSONFromGN(gn_args)
def strip_slashes(lst):
return {str(p.lstrip('/')) for p in lst}
srcs = strip_slashes(js['targets']['//:skia']['sources'])
cflags = strip_slashes(js['targets']['//:skia']['cflags'])
cflags_cc = strip_slashes(js['targets']['//:skia']['cflags_cc'])
local_includes = strip_slashes(js['targets']['//:skia']['include_dirs'])
export_includes = strip_slashes(js['targets']['//:public']['include_dirs'])
defines = [str(d) for d in js['targets']['//:skia']['defines']]
dm_srcs = strip_slashes(js['targets']['//:dm']['sources'])
dm_includes = strip_slashes(js['targets']['//:dm']['include_dirs'])
nanobench_target = js['targets']['//:nanobench']
nanobench_srcs = strip_slashes(nanobench_target['sources'])
nanobench_includes = strip_slashes(nanobench_target['include_dirs'])
gn_to_bp_utils.GrabDependentValues(js, '//:skia', 'sources', srcs, None)
gn_to_bp_utils.GrabDependentValues(js, '//:dm', 'sources', dm_srcs, 'skia')
gn_to_bp_utils.GrabDependentValues(js, '//:nanobench', 'sources',
nanobench_srcs, 'skia')
# skcms is a little special, kind of a second-party library.
srcs .add("third_party/skcms/skcms.c")
local_includes.add("third_party/skcms")
dm_includes .add("third_party/skcms")
# No need to list headers.
srcs = {s for s in srcs if not s.endswith('.h')}
dm_srcs = {s for s in dm_srcs if not s.endswith('.h')}
nanobench_srcs = {s for s in nanobench_srcs if not s.endswith('.h')}
cflags = gn_to_bp_utils.CleanupCFlags(cflags)
cflags_cc = gn_to_bp_utils.CleanupCCFlags(cflags_cc)
# We need to add the include path to the vulkan defines and header file set in
# then skia_vulkan_header gn arg that is used for framework builds.
local_includes.add("platform_tools/android/vulkan")
export_includes.add("platform_tools/android/vulkan")
here = os.path.dirname(__file__)
defs = gn_to_bp_utils.GetArchSources(os.path.join(here, 'opts.gni'))
gn_to_bp_utils.WriteUserConfig('include/config/SkUserConfig.h', defines)
# Turn a list of strings into the style bpfmt outputs.
def bpfmt(indent, lst, sort=True):
if sort:
lst = sorted(lst)
return ('\n' + ' '*indent).join('"%s",' % v for v in lst)
# OK! We have everything to fill in Android.bp...
with open('Android.bp', 'w') as f:
print >>f, bp.substitute({
'export_includes': bpfmt(8, export_includes),
'local_includes': bpfmt(8, local_includes),
'srcs': bpfmt(8, srcs),
'cflags': bpfmt(8, cflags, False),
'cflags_cc': bpfmt(8, cflags_cc),
'arm_srcs': bpfmt(16, defs['armv7']),
'arm_neon_srcs': bpfmt(20, defs['neon']),
'arm64_srcs': bpfmt(16, defs['arm64'] +
defs['crc32']),
'none_srcs': bpfmt(16, defs['none']),
'x86_srcs': bpfmt(16, defs['sse2'] +
defs['ssse3'] +
defs['sse41'] +
defs['sse42'] +
defs['avx' ] +
defs['hsw' ]),
'dm_includes' : bpfmt(8, dm_includes),
'dm_srcs' : bpfmt(8, dm_srcs),
'nanobench_includes' : bpfmt(8, nanobench_includes),
'nanobench_srcs' : bpfmt(8, nanobench_srcs),
}) | en | 0.606456 | #!/usr/bin/env python # # Copyright 2016 Google Inc. # # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # Generate Android.bp for Skia from GN configuration. # First we start off with a template for Android.bp, # with holes for source lists and include directories. // This file is autogenerated by gn_to_bp.py. cc_library_static { name: "libskia", cflags: [ $cflags ], cppflags:[ $cflags_cc ], export_include_dirs: [ $export_includes ], local_include_dirs: [ $local_includes ], srcs: [ $srcs ], arch: { arm: { srcs: [ $arm_srcs ], neon: { srcs: [ $arm_neon_srcs ], }, }, arm64: { srcs: [ $arm64_srcs ], }, mips: { srcs: [ $none_srcs ], }, mips64: { srcs: [ $none_srcs ], }, x86: { srcs: [ $x86_srcs ], cflags: [ // Clang seems to think new/malloc will only be 4-byte aligned // on x86 Android. We're pretty sure it's actually 8-byte // alignment. tests/OverAlignedTest.cpp has more information, // and should fail if we're wrong. "-Wno-over-aligned" ], }, x86_64: { srcs: [ $x86_srcs ], }, }, defaults: ["skia_deps", "skia_pgo", ], } // Build libskia with PGO by default. // Location of PGO profile data is defined in build/soong/cc/pgo.go // and is separate from skia. // To turn it off, set ANDROID_PGO_NO_PROFILE_USE environment variable // or set enable_profile_use property to false. cc_defaults { name: "skia_pgo", pgo: { instrumentation: true, profile_file: "hwui/hwui.profdata", benchmarks: ["hwui", "skia"], enable_profile_use: true, }, } // "defaults" property to disable profile use for Skia tools and benchmarks. cc_defaults { name: "skia_pgo_no_profile_use", defaults: [ "skia_pgo", ], pgo: { enable_profile_use: false, }, } cc_defaults { name: "skia_deps", shared_libs: [ "libEGL", "libGLESv2", "libdng_sdk", "libexpat", "libft2", "libheif", "libicui18n", "libicuuc", "libjpeg", "liblog", "libpiex", "libpng", "libvulkan", "libz", "libcutils", "libnativewindow", ], static_libs: [ "libarect", "libsfntly", "libwebp-decode", "libwebp-encode", ], group_static_libs: true, } cc_defaults { name: "skia_tool_deps", defaults: [ "skia_deps", "skia_pgo_no_profile_use" ], static_libs: [ "libjsoncpp", "libskia", ], cflags: [ "-Wno-unused-parameter", "-Wno-unused-variable", ], } cc_test { name: "skia_dm", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $dm_includes ], srcs: [ $dm_srcs ], shared_libs: [ "libbinder", "libutils", ], } cc_test { name: "skia_nanobench", defaults: [ "skia_tool_deps" ], local_include_dirs: [ $nanobench_includes ], srcs: [ $nanobench_srcs ], data: [ "resources/*", ], } # We'll run GN to get the main source lists and include directories for Skia. # requires rapidjson third-party # skcms is a little special, kind of a second-party library. # No need to list headers. # We need to add the include path to the vulkan defines and header file set in # then skia_vulkan_header gn arg that is used for framework builds. # Turn a list of strings into the style bpfmt outputs. # OK! We have everything to fill in Android.bp... | 1.998485 | 2 |
python/ray/autoscaler/tags.py | firebolt55439/ray | 21,382 | 486 | """The Ray autoscaler uses tags/labels to associate metadata with instances."""
# Tag for the name of the node
TAG_RAY_NODE_NAME = "ray-node-name"
# Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag
# value says 'type' instead of 'kind'.
TAG_RAY_NODE_KIND = "ray-node-type"
NODE_KIND_HEAD = "head"
NODE_KIND_WORKER = "worker"
NODE_KIND_UNMANAGED = "unmanaged"
# Tag for user defined node types (e.g., m4xl_spot). This is used for multi
# node type clusters.
TAG_RAY_USER_NODE_TYPE = "ray-user-node-type"
# Tag for autofilled node types for legacy cluster yamls without multi
# node type defined in the cluster configs.
NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type"
NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type"
# Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS = "ray-node-status"
STATUS_UNINITIALIZED = "uninitialized"
STATUS_WAITING_FOR_SSH = "waiting-for-ssh"
STATUS_SYNCING_FILES = "syncing-files"
STATUS_SETTING_UP = "setting-up"
STATUS_UPDATE_FAILED = "update-failed"
STATUS_UP_TO_DATE = "up-to-date"
# Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME = "ray-cluster-name"
# Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG = "ray-launch-config"
# Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config"
# Hash of the contents of the directories specified by the file_mounts config
# if the node is a worker, this also hashes content of the directories
# specified by the cluster_synced_files config
TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
| """The Ray autoscaler uses tags/labels to associate metadata with instances."""
# Tag for the name of the node
TAG_RAY_NODE_NAME = "ray-node-name"
# Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag
# value says 'type' instead of 'kind'.
TAG_RAY_NODE_KIND = "ray-node-type"
NODE_KIND_HEAD = "head"
NODE_KIND_WORKER = "worker"
NODE_KIND_UNMANAGED = "unmanaged"
# Tag for user defined node types (e.g., m4xl_spot). This is used for multi
# node type clusters.
TAG_RAY_USER_NODE_TYPE = "ray-user-node-type"
# Tag for autofilled node types for legacy cluster yamls without multi
# node type defined in the cluster configs.
NODE_TYPE_LEGACY_HEAD = "ray-legacy-head-node-type"
NODE_TYPE_LEGACY_WORKER = "ray-legacy-worker-node-type"
# Tag that reports the current state of the node (e.g. Updating, Up-to-date)
TAG_RAY_NODE_STATUS = "ray-node-status"
STATUS_UNINITIALIZED = "uninitialized"
STATUS_WAITING_FOR_SSH = "waiting-for-ssh"
STATUS_SYNCING_FILES = "syncing-files"
STATUS_SETTING_UP = "setting-up"
STATUS_UPDATE_FAILED = "update-failed"
STATUS_UP_TO_DATE = "up-to-date"
# Tag uniquely identifying all nodes of a cluster
TAG_RAY_CLUSTER_NAME = "ray-cluster-name"
# Hash of the node launch config, used to identify out-of-date nodes
TAG_RAY_LAUNCH_CONFIG = "ray-launch-config"
# Hash of the node runtime config, used to determine if updates are needed
TAG_RAY_RUNTIME_CONFIG = "ray-runtime-config"
# Hash of the contents of the directories specified by the file_mounts config
# if the node is a worker, this also hashes content of the directories
# specified by the cluster_synced_files config
TAG_RAY_FILE_MOUNTS_CONTENTS = "ray-file-mounts-contents"
| en | 0.759492 | The Ray autoscaler uses tags/labels to associate metadata with instances. # Tag for the name of the node # Tag for the kind of node (e.g. Head, Worker). For legacy reasons, the tag # value says 'type' instead of 'kind'. # Tag for user defined node types (e.g., m4xl_spot). This is used for multi # node type clusters. # Tag for autofilled node types for legacy cluster yamls without multi # node type defined in the cluster configs. # Tag that reports the current state of the node (e.g. Updating, Up-to-date) # Tag uniquely identifying all nodes of a cluster # Hash of the node launch config, used to identify out-of-date nodes # Hash of the node runtime config, used to determine if updates are needed # Hash of the contents of the directories specified by the file_mounts config # if the node is a worker, this also hashes content of the directories # specified by the cluster_synced_files config | 1.769897 | 2 |
tests/generation_test.py | stefan-feltmann/lands | 0 | 487 | import unittest
from worldengine.plates import Step, center_land, world_gen
from worldengine.world import World
from tests.draw_test import TestBase
class TestGeneration(TestBase):
def setUp(self):
super(TestGeneration, self).setUp()
def test_world_gen_does_not_explode_badly(self):
# FIXME remove me when proper tests are in place
# Very stupid test that just verify nothing explode badly
world_gen("Dummy", 32, 16, 1, step=Step.get_by_name("full"))
@staticmethod
def _mean_elevation_at_borders(world):
borders_total_elevation = 0.0
for y in range(world.height):
borders_total_elevation += world.elevation_at((0, y))
borders_total_elevation += world.elevation_at((world.width - 1, y))
for x in range(1, world.width - 1):
borders_total_elevation += world.elevation_at((x, 0))
borders_total_elevation += world.elevation_at((x, world.height - 1))
n_cells_on_border = world.width * 2 + world.height * 2 - 4
return borders_total_elevation / n_cells_on_border
def test_center_land(self):
w = World.from_pickle_file("%s/plates_279.world" % self.tests_data_dir)
# We want to have less land than before at the borders
el_before = TestGeneration._mean_elevation_at_borders(w)
center_land(w)
el_after = TestGeneration._mean_elevation_at_borders(w)
self.assertTrue(el_after <= el_before)
if __name__ == '__main__':
unittest.main()
| import unittest
from worldengine.plates import Step, center_land, world_gen
from worldengine.world import World
from tests.draw_test import TestBase
class TestGeneration(TestBase):
def setUp(self):
super(TestGeneration, self).setUp()
def test_world_gen_does_not_explode_badly(self):
# FIXME remove me when proper tests are in place
# Very stupid test that just verify nothing explode badly
world_gen("Dummy", 32, 16, 1, step=Step.get_by_name("full"))
@staticmethod
def _mean_elevation_at_borders(world):
borders_total_elevation = 0.0
for y in range(world.height):
borders_total_elevation += world.elevation_at((0, y))
borders_total_elevation += world.elevation_at((world.width - 1, y))
for x in range(1, world.width - 1):
borders_total_elevation += world.elevation_at((x, 0))
borders_total_elevation += world.elevation_at((x, world.height - 1))
n_cells_on_border = world.width * 2 + world.height * 2 - 4
return borders_total_elevation / n_cells_on_border
def test_center_land(self):
w = World.from_pickle_file("%s/plates_279.world" % self.tests_data_dir)
# We want to have less land than before at the borders
el_before = TestGeneration._mean_elevation_at_borders(w)
center_land(w)
el_after = TestGeneration._mean_elevation_at_borders(w)
self.assertTrue(el_after <= el_before)
if __name__ == '__main__':
unittest.main()
| en | 0.925367 | # FIXME remove me when proper tests are in place # Very stupid test that just verify nothing explode badly # We want to have less land than before at the borders | 2.755474 | 3 |
tests/test_models/test_components/test_discriminators/test_light_cnn.py | ChenShuwei1001/mmediting | 0 | 488 | <filename>tests/test_models/test_components/test_discriminators/test_light_cnn.py
import pytest
import torch
from mmedit.models.builder import build_component
from mmedit.models.components.discriminators.light_cnn import MaxFeature
def test_max_feature():
# cpu
conv2d = MaxFeature(16, 16, filter_type='conv2d')
x1 = torch.rand(3, 16, 16, 16)
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
linear = MaxFeature(16, 16, filter_type='linear')
x2 = torch.rand(3, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# gpu
if torch.cuda.is_available():
x1 = x1.cuda()
x2 = x2.cuda()
conv2d = conv2d.cuda()
linear = linear.cuda()
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# filter_type should be conv2d or linear
with pytest.raises(ValueError):
MaxFeature(12, 12, filter_type='conv1d')
def test_light_cnn():
cfg = dict(type='LightCNN', in_channels=3)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
inputs = torch.rand((2, 3, 128, 128))
output = net(inputs)
assert output.shape == (2, 1)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(inputs.cuda())
assert output.shape == (2, 1)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
| <filename>tests/test_models/test_components/test_discriminators/test_light_cnn.py
import pytest
import torch
from mmedit.models.builder import build_component
from mmedit.models.components.discriminators.light_cnn import MaxFeature
def test_max_feature():
# cpu
conv2d = MaxFeature(16, 16, filter_type='conv2d')
x1 = torch.rand(3, 16, 16, 16)
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
linear = MaxFeature(16, 16, filter_type='linear')
x2 = torch.rand(3, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# gpu
if torch.cuda.is_available():
x1 = x1.cuda()
x2 = x2.cuda()
conv2d = conv2d.cuda()
linear = linear.cuda()
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# filter_type should be conv2d or linear
with pytest.raises(ValueError):
MaxFeature(12, 12, filter_type='conv1d')
def test_light_cnn():
cfg = dict(type='LightCNN', in_channels=3)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
inputs = torch.rand((2, 3, 128, 128))
output = net(inputs)
assert output.shape == (2, 1)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(inputs.cuda())
assert output.shape == (2, 1)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
| en | 0.734634 | # cpu # gpu # filter_type should be conv2d or linear # cpu # gpu # pretrained should be str or None | 2.349393 | 2 |
src/consumer.py | ssichynskyi/web_metrics_posting | 0 | 489 | <filename>src/consumer.py
import json
import logging
from typing import Iterable
from kafka import KafkaConsumer
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# I've used this example:
# https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py
# as well as Aiven Kafka tutorials
class Consumer:
GROUP_ID = 'web_metrics_consumer'
CLIENT_ID = 'website-monitoring-consumer-service'
def __init__(
self,
*topics,
**connection_kwargs
):
"""Class for creating Kafka consumer.
Args:
*topics - topics to subscribe to. Could be changed during lifetime, str
**connection_kwargs - keyword arguments as taken by KafkaConsumer
below there are some useful kwargs and their default value:
'bootstrap_servers' - uri with port for the service
'security_protocol' - SSL, SASL_PLAINTEXT, etc
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None
Note:
although all params are optional, at least
'sasl_plain_username' and 'sasl_plain_password'
or
'ssl_cafile', 'ssl_certfile' and 'ssl_keyfile
or other certificate-related inputs shall be defined
Usage:
Connection is activated not on object instantiation but
when entering with statement. e.g.:
consumer = Consumer(...)
with consumer:
consumer.send(...)
"""
self._topics = topics
self._connection_data = connection_kwargs
# auto-determine security protocol if not provided
try:
self._connection_data['security_protocol']
except KeyError:
username_given = 'sasl_plain_username' in self._connection_data.keys()
password_given = '<PASSWORD>' in self._connection_data.keys()
ca_file_given = 'ssl_cafile' in self._connection_data.keys()
service_cert_given = 'ssl_certfile' in self._connection_data.keys()
service_key_given = 'ssl_keyfile' in self._connection_data.keys()
if all((ca_file_given, service_cert_given, service_key_given)):
self._connection_data['security_protocol'] = 'SSL'
elif username_given and password_given:
self._connection_data['security_protocol'] = 'SASL_PLAINTEXT'
else:
msg = 'Security protocol not provided and cannot be determined automatically.'
msg = f'{msg} Check auth kwargs'
raise ValueError(msg)
self._client_id = f'{self.CLIENT_ID}:{id(self)}'
def __enter__(self):
"""Method which creates the connection. Activated inside with statement."""
self._consumer = KafkaConsumer(
*self._topics,
**self._connection_data,
auto_offset_reset='earliest',
enable_auto_commit=False,
client_id=self._client_id,
group_id=self.GROUP_ID,
consumer_timeout_ms=1000,
value_deserializer=lambda x: json.loads(x.decode("utf-8"))
)
log.info(f'Connected to kafka broker at: {self._consumer.config["bootstrap_servers"]}')
def fetch_latest(self):
"""Fetches only not read messages by members of this group.
Returns:
list of decoded message values
"""
self._consumer.poll()
messages = list()
for message in self._consumer:
messages.append(message.value)
log.info(
f'Fetched {len(messages)} messages from {self._consumer.config["bootstrap_servers"]}'
)
self._consumer.commit()
return messages
def change_topics(self, topics: Iterable) -> None:
"""Changes Kafka consumer topic statically or dynamically
Args:
topics: any iterable: set, list, tuple
Returns:
None
"""
topics = tuple(topics)
try:
self._consumer.unsubscribe()
self._consumer.subscribe(list(topics))
except AttributeError:
# when topics are changed in inactive consumer i.e. not inside `with` statement
self._topics = topics
def __exit__(self, exc_type, exc_value, traceback):
"""Actions to perform when exiting with statement."""
log.info(
f'Closed connection tp kafka broker at: {self._consumer.config["bootstrap_servers"]}'
)
| <filename>src/consumer.py
import json
import logging
from typing import Iterable
from kafka import KafkaConsumer
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# I've used this example:
# https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py
# as well as Aiven Kafka tutorials
class Consumer:
GROUP_ID = 'web_metrics_consumer'
CLIENT_ID = 'website-monitoring-consumer-service'
def __init__(
self,
*topics,
**connection_kwargs
):
"""Class for creating Kafka consumer.
Args:
*topics - topics to subscribe to. Could be changed during lifetime, str
**connection_kwargs - keyword arguments as taken by KafkaConsumer
below there are some useful kwargs and their default value:
'bootstrap_servers' - uri with port for the service
'security_protocol' - SSL, SASL_PLAINTEXT, etc
'sasl_mechanism': None,
'sasl_plain_username': None,
'sasl_plain_password': None,
'ssl_cafile': None,
'ssl_certfile': None,
'ssl_keyfile': None
Note:
although all params are optional, at least
'sasl_plain_username' and 'sasl_plain_password'
or
'ssl_cafile', 'ssl_certfile' and 'ssl_keyfile
or other certificate-related inputs shall be defined
Usage:
Connection is activated not on object instantiation but
when entering with statement. e.g.:
consumer = Consumer(...)
with consumer:
consumer.send(...)
"""
self._topics = topics
self._connection_data = connection_kwargs
# auto-determine security protocol if not provided
try:
self._connection_data['security_protocol']
except KeyError:
username_given = 'sasl_plain_username' in self._connection_data.keys()
password_given = '<PASSWORD>' in self._connection_data.keys()
ca_file_given = 'ssl_cafile' in self._connection_data.keys()
service_cert_given = 'ssl_certfile' in self._connection_data.keys()
service_key_given = 'ssl_keyfile' in self._connection_data.keys()
if all((ca_file_given, service_cert_given, service_key_given)):
self._connection_data['security_protocol'] = 'SSL'
elif username_given and password_given:
self._connection_data['security_protocol'] = 'SASL_PLAINTEXT'
else:
msg = 'Security protocol not provided and cannot be determined automatically.'
msg = f'{msg} Check auth kwargs'
raise ValueError(msg)
self._client_id = f'{self.CLIENT_ID}:{id(self)}'
def __enter__(self):
"""Method which creates the connection. Activated inside with statement."""
self._consumer = KafkaConsumer(
*self._topics,
**self._connection_data,
auto_offset_reset='earliest',
enable_auto_commit=False,
client_id=self._client_id,
group_id=self.GROUP_ID,
consumer_timeout_ms=1000,
value_deserializer=lambda x: json.loads(x.decode("utf-8"))
)
log.info(f'Connected to kafka broker at: {self._consumer.config["bootstrap_servers"]}')
def fetch_latest(self):
"""Fetches only not read messages by members of this group.
Returns:
list of decoded message values
"""
self._consumer.poll()
messages = list()
for message in self._consumer:
messages.append(message.value)
log.info(
f'Fetched {len(messages)} messages from {self._consumer.config["bootstrap_servers"]}'
)
self._consumer.commit()
return messages
def change_topics(self, topics: Iterable) -> None:
"""Changes Kafka consumer topic statically or dynamically
Args:
topics: any iterable: set, list, tuple
Returns:
None
"""
topics = tuple(topics)
try:
self._consumer.unsubscribe()
self._consumer.subscribe(list(topics))
except AttributeError:
# when topics are changed in inactive consumer i.e. not inside `with` statement
self._topics = topics
def __exit__(self, exc_type, exc_value, traceback):
"""Actions to perform when exiting with statement."""
log.info(
f'Closed connection tp kafka broker at: {self._consumer.config["bootstrap_servers"]}'
)
| en | 0.762099 | # I've used this example: # https://github.com/aiven/aiven-examples/blob/master/kafka/python/consumer_example.py # as well as Aiven Kafka tutorials Class for creating Kafka consumer. Args: *topics - topics to subscribe to. Could be changed during lifetime, str **connection_kwargs - keyword arguments as taken by KafkaConsumer below there are some useful kwargs and their default value: 'bootstrap_servers' - uri with port for the service 'security_protocol' - SSL, SASL_PLAINTEXT, etc 'sasl_mechanism': None, 'sasl_plain_username': None, 'sasl_plain_password': None, 'ssl_cafile': None, 'ssl_certfile': None, 'ssl_keyfile': None Note: although all params are optional, at least 'sasl_plain_username' and 'sasl_plain_password' or 'ssl_cafile', 'ssl_certfile' and 'ssl_keyfile or other certificate-related inputs shall be defined Usage: Connection is activated not on object instantiation but when entering with statement. e.g.: consumer = Consumer(...) with consumer: consumer.send(...) # auto-determine security protocol if not provided Method which creates the connection. Activated inside with statement. Fetches only not read messages by members of this group. Returns: list of decoded message values Changes Kafka consumer topic statically or dynamically Args: topics: any iterable: set, list, tuple Returns: None # when topics are changed in inactive consumer i.e. not inside `with` statement Actions to perform when exiting with statement. | 2.613389 | 3 |
pp2_model.py | BetterManlinfeng/hyperclasspptwo | 0 | 490 |
from tensorflow.keras import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential,regularizers
from tensorflow.keras.layers import Dropout
# from tensorflow.keras import *
# 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'
from tensorflow.python.keras.layers import Concatenate
def regularized_padded_conv(*args, **kwargs):
return layers.Conv2D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
############################### 通道注意力机制 ###############################
class ChannelAttention(layers.Layer):
def __init__(self, in_planes, ratio=8):
super(ChannelAttention, self).__init__()
self.avg= layers.GlobalAveragePooling2D()
self.max= layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True)
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
############################### 空间注意力机制 ###############################
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = regularized_padded_conv(1, kernel_size=kernel_size, strides=1, activation=tf.nn.sigmoid)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3)
max_out = tf.reduce_max(inputs, axis=3)
out = tf.stack([avg_out, max_out], axis=3) # 创建一个维度,拼接到一起concat。
out = self.conv1(out)
return out
class BasicBlock(layers.Layer):
def __init__(self, filter_num, stride=1):
super(BasicBlock, self).__init__()
# self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same', kernel_initializer='he_normal',kernel_regularizer=keras.regularizers.l2(5e-4))
self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same',kernel_regularizer=regularizers.l2(0.0001)) #kernel_initializer='he_normal',
self.bn1 = layers.BatchNormalization()
self.relu = layers.Activation('relu')
self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same',kernel_regularizer=regularizers.l2(0.0001))
self.bn2 = layers.BatchNormalization()
############################### 注意力机制 ###############################
self.ca = ChannelAttention(filter_num)
self.sa = SpatialAttention()
if stride != 1:
self.downsample = Sequential()
self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
else:
self.downsample = lambda x:x
def call(self, inputs, training=None):
# [b, h, w, c]
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
############################### 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
identity = self.downsample(inputs)
output = layers.add([out, identity])
output = tf.nn.relu(output)
return output
######################################
class build_resblock(keras.Model):
def __init__(self, filter_num, stride):
super(build_resblock, self).__init__()
self.BasicBlock1 = BasicBlock(filter_num, stride)
self.BasicBlock2 = BasicBlock(filter_num, stride=1)
def call(self,blocks):
res_blocks = Sequential()
res_blocks.add(self.BasicBlock1)
for _ in range(1, blocks):
res_blocks.add(self.BasicBlock2)
return res_blocks
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
######################################
class ResNet(keras.Model):
def __init__(self, layer_dims, num_classes=16): # [2, 2, 2, 2]
super(ResNet, self).__init__()
self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
])
self.layer1 = self.build_resblock(64, layer_dims[0])
self.layer2 = self.build_resblock(128, layer_dims[1], stride=1)
self.layer3 = self.build_resblock(256, layer_dims[2], stride=1)
self.layer4 = self.build_resblock(512, layer_dims[3], stride=1)
# output: [b, 512, h, w],
self.avgpool = layers.GlobalAveragePooling2D()
self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def resnet18():
return ResNet([2, 2, 2, 2],num_classes=9)
def resnet34():
return ResNet([3, 4, 6, 3],num_classes=9)
########################### pp2主模型 ########################################
class pp2_model(keras.Model):
def __init__(self,filters_num,layer_dims,num_classes,dropout_rate):
super(pp2_model, self).__init__()
self.conv1 = layers.Conv3D(filters_num[0],kernel_size=(3,3,7),padding='same') # filters_num = 8
self.bn1 = layers.BatchNormalization()
self.relu1 = layers.Activation('relu')
self.conv2 = layers.Conv3D(filters_num[1],kernel_size=(3,3,5),padding='same') # filters_num = 16
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.Activation('relu')
self.conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3), padding='same') # filters_num = 32
self.bn3 = layers.BatchNormalization()
self.relu3 = layers.Activation('relu')
# self.reshape = layers.Reshape()
self.conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same') # filters_num = 64
self.bn4 = layers.BatchNormalization()
self.relu4 = layers.Activation('relu')
self.conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same') # filters_num = **
self.bn5 = layers.BatchNormalization()
self.relu5 = layers.Activation('relu')
self.dpout = layers.Dropout(dropout_rate)
self.layer1 = self.build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
# output: [b, 512, h, w],
# self.fc1 = layers.Flatten()
self.avgpool = layers.GlobalAveragePooling2D()
self.fc2 = layers.Dense(filters_num[7],activation='relu')
self.fc3 = layers.Dense(filters_num[6],activation='relu')
self.fc4 = layers.Dense(num_classes)
def call(self,inputs,training=None):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu3(out)
# reshape
out = layers.Reshape((out.shape[1],out.shape[2],out.shape[3] * out.shape[4]))(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu4(out)
out = self.dpout(out)
out = self.conv5(out)
out = self.bn5(out)
out = self.dpout(out)
out = self.relu5(out)
x = self.layer1(out)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc2(x)
x = self.dpout(x)
x = self.fc3(x)
x = self.fc4(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
class ResNet_block(keras.Model):
def __init__(self, layer_dims,filters_num): # [2, 2, 2, 2]
super(ResNet_block, self).__init__()
#
# self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
# layers.BatchNormalization(),
# layers.Activation('relu'),
# layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
# ])
self.layer1 = self.build_resblock(filters_num[0], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[1], layer_dims[1], stride=1) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[2], layer_dims[2], stride=1) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[3], layer_dims[3], stride=1) # filters_num = 512
# output: [b, 512, h, w],
# self.avgpool = layers.GlobalAveragePooling2D()
# self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
# x = self.stem(inputs)
x1 = self.layer1(inputs)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# [b, c]
# x = self.avgpool(x)
# [b, 100]
# x = self.fc(x)
return x2,x4
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def network_up(input_layer_up,filters_num,dropout_rate,Block_res):
# input_layer = Input(input_shape)
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8
conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same',
kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal',
# conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1)
# conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1)
conv1_bn = layers.BatchNormalization()(conv1)
conv1_relu = layers.Activation('relu')(conv1_bn)
# conv1_relu = Dropout(0.5)(conv1_relu)
# conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu)
# conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16
conv2_bn = layers.BatchNormalization()(conv2)
conv2_relu = layers.Activation('relu')(conv2_bn)
# conv2_relu = Dropout(0.5)(conv2_relu)
# conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu)
conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32
conv3_bn = layers.BatchNormalization()(conv3)
conv3_relu = layers.Activation('relu')(conv3_bn)
# conv3_relu = Dropout(0.5)(conv3_relu)
# conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu)
conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu)
conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape)
##################第二个尺度#########################
# conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv11_bn = layers.BatchNormalization()(conv11)
# conv11_relu = layers.Activation('relu')(conv11_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16
# conv22_bn = layers.BatchNormalization()(conv22)
# conv22_relu = layers.Activation('relu')(conv22_bn)
#
# conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32
# conv33_bn = layers.BatchNormalization()(conv33)
# conv33_relu = layers.Activation('relu')(conv33_bn)
#
# conv33_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu)
####################################################
# conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv111_bn = layers.BatchNormalization()(conv111)
# conv111_relu = layers.Activation('relu')(conv111_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16
# conv222_bn = layers.BatchNormalization()(conv222)
# conv222_relu = layers.Activation('relu')(conv222_bn)
#
# conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32
# conv333_bn = layers.BatchNormalization()(conv333)
# conv333_relu = layers.Activation('relu')(conv333_bn)
#
# conv333_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu)
#################concatenate########################
# conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape])
#########################################
conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64
conv4_bn = layers.BatchNormalization()(conv4)
conv4_relu = layers.Activation('relu')(conv4_bn)
# conv4_relu = Dropout(0.5)(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu)
conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = **
conv5_bn = layers.BatchNormalization()(conv5)
conv5_relu = layers.Activation('relu')(conv5_bn)
# conv5_relu = Dropout(0.5)(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu)
# conv5_dpout = layers.Dropout(dropout_rate)(conv5)
# conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout)
outputs2,outputs4 = Block_res(conv5_relu)
return conv5,outputs2,outputs4
# layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
# layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
# layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
# layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
|
from tensorflow.keras import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Sequential,regularizers
from tensorflow.keras.layers import Dropout
# from tensorflow.keras import *
# 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal'
from tensorflow.python.keras.layers import Concatenate
def regularized_padded_conv(*args, **kwargs):
return layers.Conv2D(*args, **kwargs, padding='same', use_bias=False,
kernel_initializer='he_normal',
kernel_regularizer=regularizers.l2(5e-4))
############################### 通道注意力机制 ###############################
class ChannelAttention(layers.Layer):
def __init__(self, in_planes, ratio=8):
super(ChannelAttention, self).__init__()
self.avg= layers.GlobalAveragePooling2D()
self.max= layers.GlobalMaxPooling2D()
self.conv1 = layers.Conv2D(in_planes//ratio, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True, activation=tf.nn.relu)
self.conv2 = layers.Conv2D(in_planes, kernel_size=1, strides=1, padding='same',
kernel_regularizer=regularizers.l2(5e-4),
use_bias=True)
def call(self, inputs):
avg = self.avg(inputs)
max = self.max(inputs)
avg = layers.Reshape((1, 1, avg.shape[1]))(avg) # shape (None, 1, 1 feature)
max = layers.Reshape((1, 1, max.shape[1]))(max) # shape (None, 1, 1 feature)
avg_out = self.conv2(self.conv1(avg))
max_out = self.conv2(self.conv1(max))
out = avg_out + max_out
out = tf.nn.sigmoid(out)
return out
############################### 空间注意力机制 ###############################
class SpatialAttention(layers.Layer):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = regularized_padded_conv(1, kernel_size=kernel_size, strides=1, activation=tf.nn.sigmoid)
def call(self, inputs):
avg_out = tf.reduce_mean(inputs, axis=3)
max_out = tf.reduce_max(inputs, axis=3)
out = tf.stack([avg_out, max_out], axis=3) # 创建一个维度,拼接到一起concat。
out = self.conv1(out)
return out
class BasicBlock(layers.Layer):
def __init__(self, filter_num, stride=1):
super(BasicBlock, self).__init__()
# self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same', kernel_initializer='he_normal',kernel_regularizer=keras.regularizers.l2(5e-4))
self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same',kernel_regularizer=regularizers.l2(0.0001)) #kernel_initializer='he_normal',
self.bn1 = layers.BatchNormalization()
self.relu = layers.Activation('relu')
self.conv2 = layers.Conv2D(filter_num, (3, 3), strides=1, padding='same',kernel_regularizer=regularizers.l2(0.0001))
self.bn2 = layers.BatchNormalization()
############################### 注意力机制 ###############################
self.ca = ChannelAttention(filter_num)
self.sa = SpatialAttention()
if stride != 1:
self.downsample = Sequential()
self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
else:
self.downsample = lambda x:x
def call(self, inputs, training=None):
# [b, h, w, c]
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
############################### 注意力机制 ###############################
out = self.ca(out) * out
out = self.sa(out) * out
identity = self.downsample(inputs)
output = layers.add([out, identity])
output = tf.nn.relu(output)
return output
######################################
class build_resblock(keras.Model):
def __init__(self, filter_num, stride):
super(build_resblock, self).__init__()
self.BasicBlock1 = BasicBlock(filter_num, stride)
self.BasicBlock2 = BasicBlock(filter_num, stride=1)
def call(self,blocks):
res_blocks = Sequential()
res_blocks.add(self.BasicBlock1)
for _ in range(1, blocks):
res_blocks.add(self.BasicBlock2)
return res_blocks
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
######################################
class ResNet(keras.Model):
def __init__(self, layer_dims, num_classes=16): # [2, 2, 2, 2]
super(ResNet, self).__init__()
self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
layers.BatchNormalization(),
layers.Activation('relu'),
layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
])
self.layer1 = self.build_resblock(64, layer_dims[0])
self.layer2 = self.build_resblock(128, layer_dims[1], stride=1)
self.layer3 = self.build_resblock(256, layer_dims[2], stride=1)
self.layer4 = self.build_resblock(512, layer_dims[3], stride=1)
# output: [b, 512, h, w],
self.avgpool = layers.GlobalAveragePooling2D()
self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
x = self.stem(inputs)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def resnet18():
return ResNet([2, 2, 2, 2],num_classes=9)
def resnet34():
return ResNet([3, 4, 6, 3],num_classes=9)
########################### pp2主模型 ########################################
class pp2_model(keras.Model):
def __init__(self,filters_num,layer_dims,num_classes,dropout_rate):
super(pp2_model, self).__init__()
self.conv1 = layers.Conv3D(filters_num[0],kernel_size=(3,3,7),padding='same') # filters_num = 8
self.bn1 = layers.BatchNormalization()
self.relu1 = layers.Activation('relu')
self.conv2 = layers.Conv3D(filters_num[1],kernel_size=(3,3,5),padding='same') # filters_num = 16
self.bn2 = layers.BatchNormalization()
self.relu2 = layers.Activation('relu')
self.conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3), padding='same') # filters_num = 32
self.bn3 = layers.BatchNormalization()
self.relu3 = layers.Activation('relu')
# self.reshape = layers.Reshape()
self.conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same') # filters_num = 64
self.bn4 = layers.BatchNormalization()
self.relu4 = layers.Activation('relu')
self.conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same') # filters_num = **
self.bn5 = layers.BatchNormalization()
self.relu5 = layers.Activation('relu')
self.dpout = layers.Dropout(dropout_rate)
self.layer1 = self.build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
# output: [b, 512, h, w],
# self.fc1 = layers.Flatten()
self.avgpool = layers.GlobalAveragePooling2D()
self.fc2 = layers.Dense(filters_num[7],activation='relu')
self.fc3 = layers.Dense(filters_num[6],activation='relu')
self.fc4 = layers.Dense(num_classes)
def call(self,inputs,training=None):
out = self.conv1(inputs)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu3(out)
# reshape
out = layers.Reshape((out.shape[1],out.shape[2],out.shape[3] * out.shape[4]))(out)
out = self.conv4(out)
out = self.bn4(out)
out = self.relu4(out)
out = self.dpout(out)
out = self.conv5(out)
out = self.bn5(out)
out = self.dpout(out)
out = self.relu5(out)
x = self.layer1(out)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# [b, c]
x = self.avgpool(x)
# [b, 100]
x = self.fc2(x)
x = self.dpout(x)
x = self.fc3(x)
x = self.fc4(x)
return x
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
class ResNet_block(keras.Model):
def __init__(self, layer_dims,filters_num): # [2, 2, 2, 2]
super(ResNet_block, self).__init__()
#
# self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)),
# layers.BatchNormalization(),
# layers.Activation('relu'),
# layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same')
# ])
self.layer1 = self.build_resblock(filters_num[0], layer_dims[0]) # filters_num = 64
self.layer2 = self.build_resblock(filters_num[1], layer_dims[1], stride=1) # filters_num = 128
self.layer3 = self.build_resblock(filters_num[2], layer_dims[2], stride=1) # filters_num = 256
self.layer4 = self.build_resblock(filters_num[3], layer_dims[3], stride=1) # filters_num = 512
# output: [b, 512, h, w],
# self.avgpool = layers.GlobalAveragePooling2D()
# self.fc = layers.Dense(num_classes)
def call(self, inputs, training=None):
# x = self.stem(inputs)
x1 = self.layer1(inputs)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# [b, c]
# x = self.avgpool(x)
# [b, 100]
# x = self.fc(x)
return x2,x4
def build_resblock(self, filter_num, blocks, stride=1):
res_blocks = Sequential()
# may down sample
res_blocks.add(BasicBlock(filter_num, stride))
for _ in range(1, blocks):
res_blocks.add(BasicBlock(filter_num, stride=1))
return res_blocks
def network_up(input_layer_up,filters_num,dropout_rate,Block_res):
# input_layer = Input(input_shape)
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8
# conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8
conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3), padding='same',
kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) #kernel_initializer='he_normal',
# conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1)
# conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1)
conv1_bn = layers.BatchNormalization()(conv1)
conv1_relu = layers.Activation('relu')(conv1_bn)
# conv1_relu = Dropout(0.5)(conv1_relu)
# conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu)
# conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv1_relu) # filters_num = 16
conv2_bn = layers.BatchNormalization()(conv2)
conv2_relu = layers.Activation('relu')(conv2_bn)
# conv2_relu = Dropout(0.5)(conv2_relu)
# conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu)
conv3 = layers.Conv3D(filters_num[2], kernel_size=(3, 3, 3),padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv2_relu) # filters_num = 32
conv3_bn = layers.BatchNormalization()(conv3)
conv3_relu = layers.Activation('relu')(conv3_bn)
# conv3_relu = Dropout(0.5)(conv3_relu)
# conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu)
conv3_relu_reshape = layers.Reshape((conv3_relu.shape[1],conv3_relu.shape[2],conv3_relu.shape[3]*conv3_relu.shape[4]))(conv3_relu)
conv3_relu_reshape = Dropout(0.5)(conv3_relu_reshape)
##################第二个尺度#########################
# conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv11_bn = layers.BatchNormalization()(conv11)
# conv11_relu = layers.Activation('relu')(conv11_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16
# conv22_bn = layers.BatchNormalization()(conv22)
# conv22_relu = layers.Activation('relu')(conv22_bn)
#
# conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32
# conv33_bn = layers.BatchNormalization()(conv33)
# conv33_relu = layers.Activation('relu')(conv33_bn)
#
# conv33_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu)
####################################################
# conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same',
# kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up)
# conv111_bn = layers.BatchNormalization()(conv111)
# conv111_relu = layers.Activation('relu')(conv111_bn)
#
# # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16
# conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16
# conv222_bn = layers.BatchNormalization()(conv222)
# conv222_relu = layers.Activation('relu')(conv222_bn)
#
# conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal',
# kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32
# conv333_bn = layers.BatchNormalization()(conv333)
# conv333_relu = layers.Activation('relu')(conv333_bn)
#
# conv333_relu_reshape = layers.Reshape(
# (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu)
#################concatenate########################
# conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape])
#########################################
conv4 = layers.Conv2D(filters_num[3], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv3_relu_reshape) # filters_num = 64
conv4_bn = layers.BatchNormalization()(conv4)
conv4_relu = layers.Activation('relu')(conv4_bn)
# conv4_relu = Dropout(0.5)(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu)
# conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu)
conv5 = layers.Conv2D(filters_num[4], kernel_size=(3, 3), padding='same',kernel_regularizer=regularizers.l2(0.0001))(conv4_relu) # filters_num = **
conv5_bn = layers.BatchNormalization()(conv5)
conv5_relu = layers.Activation('relu')(conv5_bn)
# conv5_relu = Dropout(0.5)(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu)
# conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu)
# conv5_dpout = layers.Dropout(dropout_rate)(conv5)
# conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout)
outputs2,outputs4 = Block_res(conv5_relu)
return conv5,outputs2,outputs4
# layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64
# layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128
# layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256
# layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512
| en | 0.293134 | # from tensorflow.keras import * # 定义一个3x3卷积!kernel_initializer='he_normal','glorot_normal' ############################### 通道注意力机制 ############################### # shape (None, 1, 1 feature) # shape (None, 1, 1 feature) ############################### 空间注意力机制 ############################### # 创建一个维度,拼接到一起concat。 # self.conv1 = layers.Conv2D(filter_num, (3, 3), strides=stride, padding='same', kernel_initializer='he_normal',kernel_regularizer=keras.regularizers.l2(5e-4)) #kernel_initializer='he_normal', ############################### 注意力机制 ############################### # [b, h, w, c] ############################### 注意力机制 ############################### ###################################### # may down sample ###################################### # [2, 2, 2, 2] # output: [b, 512, h, w], # [b, c] # [b, 100] # may down sample ########################### pp2主模型 ######################################## # filters_num = 8 # filters_num = 16 # filters_num = 32 # self.reshape = layers.Reshape() # filters_num = 64 # filters_num = ** # filters_num = 64 # filters_num = 128 # filters_num = 256 # filters_num = 512 # output: [b, 512, h, w], # self.fc1 = layers.Flatten() # reshape # [b, c] # [b, 100] # may down sample # [2, 2, 2, 2] # # self.stem = Sequential([layers.Conv2D(64, (3, 3), strides=(1, 1)), # layers.BatchNormalization(), # layers.Activation('relu'), # layers.MaxPool2D(pool_size=(2, 2), strides=(1, 1), padding='same') # ]) # filters_num = 64 # filters_num = 128 # filters_num = 256 # filters_num = 512 # output: [b, 512, h, w], # self.avgpool = layers.GlobalAveragePooling2D() # self.fc = layers.Dense(num_classes) # x = self.stem(inputs) # [b, c] # x = self.avgpool(x) # [b, 100] # x = self.fc(x) # may down sample # input_layer = Input(input_shape) # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 7), padding='same')(input_layer) # filters_num = 8 # conv1 = layers.Conv3D(filters_num[0], kernel_size=(3, 3, 3),padding='same',kernel_initializer='he_normal',kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # filters_num = 8 #kernel_initializer='he_normal', # conv_layer1m = tf.keras.layers.MaxPooling3D(pool_size=(1, 1, 1),padding='same')(conv1) # conv_layer1g = tf.keras.layers.GlobalMaxPooling3D()(conv1) # conv1_relu = Dropout(0.5)(conv1_relu) # conv1_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv1_relu) # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # filters_num = 16 # conv2_relu = Dropout(0.5)(conv2_relu) # conv2_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv2_relu) # filters_num = 32 # conv3_relu = Dropout(0.5)(conv3_relu) # conv3_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv3_relu) ##################第二个尺度######################### # conv11 = layers.Conv3D(filters_num[0], kernel_size=(5, 5, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv11_bn = layers.BatchNormalization()(conv11) # conv11_relu = layers.Activation('relu')(conv11_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv22 = layers.Conv3D(filters_num[1], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv11_relu) # filters_num = 16 # conv22_bn = layers.BatchNormalization()(conv22) # conv22_relu = layers.Activation('relu')(conv22_bn) # # conv33 = layers.Conv3D(filters_num[2], kernel_size=(5, 5, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv22_relu) # filters_num = 32 # conv33_bn = layers.BatchNormalization()(conv33) # conv33_relu = layers.Activation('relu')(conv33_bn) # # conv33_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv33_relu) #################################################### # conv111 = layers.Conv3D(filters_num[0], kernel_size=(7, 7, 3), padding='same', # kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.0001))(input_layer_up) # conv111_bn = layers.BatchNormalization()(conv111) # conv111_relu = layers.Activation('relu')(conv111_bn) # # # conv2 = layers.Conv3D(filters_num[1], kernel_size=(3, 3, 5), padding='same')(conv1_relu) # filters_num = 16 # conv222 = layers.Conv3D(filters_num[1], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv111_relu) # filters_num = 16 # conv222_bn = layers.BatchNormalization()(conv222) # conv222_relu = layers.Activation('relu')(conv222_bn) # # conv333 = layers.Conv3D(filters_num[2], kernel_size=(7, 7, 3), padding='same', kernel_initializer='he_normal', # kernel_regularizer=regularizers.l2(0.0001))(conv222_relu) # filters_num = 32 # conv333_bn = layers.BatchNormalization()(conv333) # conv333_relu = layers.Activation('relu')(conv333_bn) # # conv333_relu_reshape = layers.Reshape( # (conv3_relu.shape[1], conv3_relu.shape[2], conv3_relu.shape[3] * conv3_relu.shape[4]))(conv333_relu) #################concatenate######################## # conv33333_relu_reshape = Concatenate(axis=-1)([conv3_relu_reshape, conv33_relu_reshape]) ######################################### # filters_num = 64 # conv4_relu = Dropout(0.5)(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv4_relu) # conv4_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv4_relu) # filters_num = ** # conv5_relu = Dropout(0.5)(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(conv5_relu) # conv5_relu = tf.keras.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding='same')(conv5_relu) # conv5_dpout = layers.Dropout(dropout_rate)(conv5) # conv5_reshape = layers.Reshape((conv5_dpout.shape[1],conv5_dpout.shape[2],conv5_dpout.shape[3]))(conv5_dpout) # layer1 = build_resblock(filters_num[5], layer_dims[0]) # filters_num = 64 # layer2 = build_resblock(filters_num[6], layer_dims[1], stride=2) # filters_num = 128 # layer3 = build_resblock(filters_num[7], layer_dims[2], stride=2) # filters_num = 256 # layer4 = build_resblock(filters_num[8], layer_dims[3], stride=2) # filters_num = 512 | 2.724202 | 3 |
medi/inference/utils.py | yuan-xy/medi | 3 | 491 | <reponame>yuan-xy/medi<filename>medi/inference/utils.py
""" A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
import os
from medi._compatibility import reraise
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def ignored(*exceptions):
"""
Value manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.5.
"""
try:
yield
except exceptions:
pass
| """ A universal module with functions / classes without dependencies. """
import sys
import contextlib
import functools
import re
import os
from medi._compatibility import reraise
_sep = os.path.sep
if os.path.altsep is not None:
_sep += os.path.altsep
_path_re = re.compile(r'(?:\.[^{0}]+|[{0}]__init__\.py)$'.format(re.escape(_sep)))
del _sep
def to_list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def to_tuple(func):
def wrapper(*args, **kwargs):
return tuple(func(*args, **kwargs))
return wrapper
def unite(iterable):
"""Turns a two dimensional array into a one dimensional."""
return set(typ for types in iterable for typ in types)
class UncaughtAttributeError(Exception):
"""
Important, because `__getattr__` and `hasattr` catch AttributeErrors
implicitly. This is really evil (mainly because of `__getattr__`).
`hasattr` in Python 2 is even more evil, because it catches ALL exceptions.
Therefore this class originally had to be derived from `BaseException`
instead of `Exception`. But because I removed relevant `hasattr` from
the code base, we can now switch back to `Exception`.
:param base: return values of sys.exc_info().
"""
def safe_property(func):
return property(reraise_uncaught(func))
def reraise_uncaught(func):
"""
Re-throw uncaught `AttributeError`.
Usage: Put ``@rethrow_uncaught`` in front of the function
which does **not** suppose to raise `AttributeError`.
AttributeError is easily get caught by `hasattr` and another
``except AttributeError`` clause. This becomes problem when you use
a lot of "dynamic" attributes (e.g., using ``@property``) because you
can't distinguish if the property does not exist for real or some code
inside of the "dynamic" attribute through that error. In a well
written code, such error should not exist but getting there is very
difficult. This decorator is to help us getting there by changing
`AttributeError` to `UncaughtAttributeError` to avoid unexpected catch.
This helps us noticing bugs earlier and facilitates debugging.
.. note:: Treating StopIteration here is easy.
Add that feature when needed.
"""
@functools.wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except AttributeError:
exc_info = sys.exc_info()
reraise(UncaughtAttributeError(exc_info[1]), exc_info[2])
return wrapper
class PushBackIterator(object):
def __init__(self, iterator):
self.pushes = []
self.iterator = iterator
self.current = None
def push_back(self, value):
self.pushes.append(value)
def __iter__(self):
return self
def next(self):
""" Python 2 Compatibility """
return self.__next__()
def __next__(self):
if self.pushes:
self.current = self.pushes.pop()
else:
self.current = next(self.iterator)
return self.current
@contextlib.contextmanager
def ignored(*exceptions):
"""
Value manager that ignores all of the specified exceptions. This will
be in the standard library starting with Python 3.5.
"""
try:
yield
except exceptions:
pass | en | 0.851952 | A universal module with functions / classes without dependencies. Turns a two dimensional array into a one dimensional. Important, because `__getattr__` and `hasattr` catch AttributeErrors implicitly. This is really evil (mainly because of `__getattr__`). `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. Therefore this class originally had to be derived from `BaseException` instead of `Exception`. But because I removed relevant `hasattr` from the code base, we can now switch back to `Exception`. :param base: return values of sys.exc_info(). Re-throw uncaught `AttributeError`. Usage: Put ``@rethrow_uncaught`` in front of the function which does **not** suppose to raise `AttributeError`. AttributeError is easily get caught by `hasattr` and another ``except AttributeError`` clause. This becomes problem when you use a lot of "dynamic" attributes (e.g., using ``@property``) because you can't distinguish if the property does not exist for real or some code inside of the "dynamic" attribute through that error. In a well written code, such error should not exist but getting there is very difficult. This decorator is to help us getting there by changing `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. This helps us noticing bugs earlier and facilitates debugging. .. note:: Treating StopIteration here is easy. Add that feature when needed. Python 2 Compatibility Value manager that ignores all of the specified exceptions. This will be in the standard library starting with Python 3.5. | 2.729981 | 3 |
run.py | Bioconductor/bioc_git_transition | 16 | 492 | """Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| """Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| en | 0.630707 | Bioconductor run git transition code. This module assembles the classes for the SVN --> Git transition can be run in a sequential manner. It runs the following aspects fo the Bioconductor transition. Note: Update the SVN dump 1. Run Bioconductor Software package transition 2. Run Bioconductor Experiment Data package transition 3. Run Workflow package transition 4. Run Manifest file transition 5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on software packages Manual tasks which need to be done: 1. Copy over bare repos to repositories/packages 2. Copy manifest bare git repo to repositories/admin # TODO: Run updates after dump update | 2.58853 | 3 |
third_party/google-endpoints/dogpile/cache/region.py | tingshao/catapult | 2,151 | 493 | <gh_stars>1000+
from __future__ import with_statement
from .. import Lock, NeedRegenerationException
from ..util import NameRegistry
from . import exception
from ..util import PluginLoader, memoized_property, coerce_string_conf
from .util import function_key_generator, function_multi_key_generator
from .api import NO_VALUE, CachedValue
from .proxy import ProxyBackend
from ..util import compat
import time
import datetime
from numbers import Number
from functools import wraps
import threading
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
from . import backends # noqa
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class RegionInvalidationStrategy(object):
"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy())
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""
def invalidate(self, hard=True):
"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""
raise NotImplementedError()
def is_hard_invalidated(self, timestamp):
"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""
raise NotImplementedError()
def is_soft_invalidated(self, timestamp):
"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""
raise NotImplementedError()
def is_invalidated(self, timestamp):
"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""
raise NotImplementedError()
def was_soft_invalidated(self):
"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""
raise NotImplementedError()
def was_hard_invalidated(self):
"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""
raise NotImplementedError()
class DefaultInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._is_hard_invalidated = None
self._invalidated = None
def invalidate(self, hard=True):
self._is_hard_invalidated = bool(hard)
self._invalidated = time.time()
def is_invalidated(self, timestamp):
return (self._invalidated is not None and
timestamp < self._invalidated)
def was_hard_invalidated(self):
return self._is_hard_invalidated is True
def is_hard_invalidated(self, timestamp):
return self.was_hard_invalidated() and self.is_invalidated(timestamp)
def was_soft_invalidated(self):
return self._is_hard_invalidated is False
def is_soft_invalidated(self, timestamp):
return self.was_soft_invalidated() and self.is_invalidated(timestamp)
class CacheRegion(object):
"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
self.key_mangler = self._user_defined_key_mangler = key_mangler
self.async_creation_runner = async_creation_runner
self.region_invalidator = DefaultInvalidationStrategy()
def configure(
self, backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None,
replace_existing_backend=False,
region_invalidator=None
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""
if "backend" in self.__dict__ and not replace_existing_backend:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s. "
"Specify replace_existing_backend=True to replace."
% self.backend)
backend_cls = _backend_loader.load(backend)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict,
_config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time))
else:
raise exception.ValidationError(
'expiration_time is not a number or timedelta.')
if not self._user_defined_key_mangler:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, '__iter__', False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
if region_invalidator:
self.region_invalidator = region_invalidator
return self
def wrap(self, proxy):
''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError("Type %s is not a valid ProxyBackend"
% type(proxy))
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is also
local to this instance of :class:`.CacheRegion`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
self.region_invalidator.invalidate(hard)
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get(
"%swrap" % prefix, None),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region.")
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return 'backend' in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(
expiration_time, ignore_expiration)(value)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
def value_fn(value):
if value is NO_VALUE:
return value
elif expiration_time is not None and \
current_time - value.metadata["ct"] > expiration_time:
return NO_VALUE
elif self.region_invalidator.is_invalidated(
value.metadata["ct"]):
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration)
return [
value.payload if value is not NO_VALUE else value
for value in
(
_unexpired_value_fn(value) for value in
backend_values
)
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata["ct"])):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata['v'])):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex)
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
self.backend.set_multi(dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
))
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(
value,
{
"ct": time.time(),
"v": value_version
})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict((
self.key_mangler(k), self._value(v))
for k, v in mapping.items())
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self, namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
else:
key_generator = function_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
decorate.original = fn
return decorate
return decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
should_cache_fn=None,
asdict=False, to_str=compat.string_type,
function_multi_key_generator=None):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(dict(
(gen_key, mapping[key])
for gen_key, key
in zip(gen_keys, keys))
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
)
return values
else:
self.set_multi(
dict(zip(keys, values))
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw)
| from __future__ import with_statement
from .. import Lock, NeedRegenerationException
from ..util import NameRegistry
from . import exception
from ..util import PluginLoader, memoized_property, coerce_string_conf
from .util import function_key_generator, function_multi_key_generator
from .api import NO_VALUE, CachedValue
from .proxy import ProxyBackend
from ..util import compat
import time
import datetime
from numbers import Number
from functools import wraps
import threading
_backend_loader = PluginLoader("dogpile.cache")
register_backend = _backend_loader.register
from . import backends # noqa
value_version = 1
"""An integer placed in the :class:`.CachedValue`
so that new versions of dogpile.cache can detect cached
values from a previous, backwards-incompatible version.
"""
class RegionInvalidationStrategy(object):
"""Region invalidation strategy interface
Implement this interface and pass implementation instance
to :meth:`.CacheRegion.configure` to override default region invalidation.
Example::
class CustomInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._soft_invalidated = None
self._hard_invalidated = None
def invalidate(self, hard=None):
if hard:
self._soft_invalidated = None
self._hard_invalidated = time.time()
else:
self._soft_invalidated = time.time()
self._hard_invalidated = None
def is_invalidated(self, timestamp):
return ((self._soft_invalidated and
timestamp < self._soft_invalidated) or
(self._hard_invalidated and
timestamp < self._hard_invalidated))
def was_hard_invalidated(self):
return bool(self._hard_invalidated)
def is_hard_invalidated(self, timestamp):
return (self._hard_invalidated and
timestamp < self._hard_invalidated)
def was_soft_invalidated(self):
return bool(self._soft_invalidated)
def is_soft_invalidated(self, timestamp):
return (self._soft_invalidated and
timestamp < self._soft_invalidated)
The custom implementation is injected into a :class:`.CacheRegion`
at configure time using the
:paramref:`.CacheRegion.configure.region_invalidator` parameter::
region = CacheRegion()
region = region.configure(region_invalidator=CustomInvalidationStrategy())
Invalidation strategies that wish to have access to the
:class:`.CacheRegion` itself should construct the invalidator given the
region as an argument::
class MyInvalidator(RegionInvalidationStrategy):
def __init__(self, region):
self.region = region
# ...
# ...
region = CacheRegion()
region = region.configure(region_invalidator=MyInvalidator(region))
.. versionadded:: 0.6.2
.. seealso::
:paramref:`.CacheRegion.configure.region_invalidator`
"""
def invalidate(self, hard=True):
"""Region invalidation.
:class:`.CacheRegion` propagated call.
The default invalidation system works by setting
a current timestamp (using ``time.time()``) to consider all older
timestamps effectively invalidated.
"""
raise NotImplementedError()
def is_hard_invalidated(self, timestamp):
"""Check timestamp to determine if it was hard invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in hard mode.
"""
raise NotImplementedError()
def is_soft_invalidated(self, timestamp):
"""Check timestamp to determine if it was soft invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time and region is invalidated
in soft mode.
"""
raise NotImplementedError()
def is_invalidated(self, timestamp):
"""Check timestamp to determine if it was invalidated.
:return: Boolean. True if ``timestamp`` is older than
the last region invalidation time.
"""
raise NotImplementedError()
def was_soft_invalidated(self):
"""Indicate the region was invalidated in soft mode.
:return: Boolean. True if region was invalidated in soft mode.
"""
raise NotImplementedError()
def was_hard_invalidated(self):
"""Indicate the region was invalidated in hard mode.
:return: Boolean. True if region was invalidated in hard mode.
"""
raise NotImplementedError()
class DefaultInvalidationStrategy(RegionInvalidationStrategy):
def __init__(self):
self._is_hard_invalidated = None
self._invalidated = None
def invalidate(self, hard=True):
self._is_hard_invalidated = bool(hard)
self._invalidated = time.time()
def is_invalidated(self, timestamp):
return (self._invalidated is not None and
timestamp < self._invalidated)
def was_hard_invalidated(self):
return self._is_hard_invalidated is True
def is_hard_invalidated(self, timestamp):
return self.was_hard_invalidated() and self.is_invalidated(timestamp)
def was_soft_invalidated(self):
return self._is_hard_invalidated is False
def is_soft_invalidated(self, timestamp):
return self.was_soft_invalidated() and self.is_invalidated(timestamp)
class CacheRegion(object):
"""A front end to a particular cache backend.
:param name: Optional, a string name for the region.
This isn't used internally
but can be accessed via the ``.name`` parameter, helpful
for configuring a region from a config file.
:param function_key_generator: Optional. A
function that will produce a "cache key" given
a data creation function and arguments, when using
the :meth:`.CacheRegion.cache_on_arguments` method.
The structure of this function
should be two levels: given the data creation function,
return a new function that generates the key based on
the given arguments. Such as::
def my_key_generator(namespace, fn, **kw):
fname = fn.__name__
def generate_key(*arg):
return namespace + "_" + fname + "_".join(str(s) for s in arg)
return generate_key
region = make_region(
function_key_generator = my_key_generator
).configure(
"dogpile.cache.dbm",
expiration_time=300,
arguments={
"filename":"file.dbm"
}
)
The ``namespace`` is that passed to
:meth:`.CacheRegion.cache_on_arguments`. It's not consulted
outside this function, so in fact can be of any form.
For example, it can be passed as a tuple, used to specify
arguments to pluck from \**kw::
def my_key_generator(namespace, fn):
def generate_key(*arg, **kw):
return ":".join(
[kw[k] for k in namespace] +
[str(x) for x in arg]
)
return generate_key
Where the decorator might be used as::
@my_region.cache_on_arguments(namespace=('x', 'y'))
def my_function(a, b, **kw):
return my_data()
.. seealso::
:func:`.function_key_generator` - default key generator
:func:`.kwarg_function_key_generator` - optional gen that also
uses keyword arguments
:param function_multi_key_generator: Optional.
Similar to ``function_key_generator`` parameter, but it's used in
:meth:`.CacheRegion.cache_multi_on_arguments`. Generated function
should return list of keys. For example::
def my_multi_key_generator(namespace, fn, **kw):
namespace = fn.__name__ + (namespace or '')
def generate_keys(*args):
return [namespace + ':' + str(a) for a in args]
return generate_keys
:param key_mangler: Function which will be used on all incoming
keys before passing to the backend. Defaults to ``None``,
in which case the key mangling function recommended by
the cache backend will be used. A typical mangler
is the SHA1 mangler found at :func:`.sha1_mangle_key`
which coerces keys into a SHA1
hash, so that the string length is fixed. To
disable all key mangling, set to ``False``. Another typical
mangler is the built-in Python function ``str``, which can be used
to convert non-string or Unicode keys to bytestrings, which is
needed when using a backend such as bsddb or dbm under Python 2.x
in conjunction with Unicode keys.
:param async_creation_runner: A callable that, when specified,
will be passed to and called by dogpile.lock when
there is a stale value present in the cache. It will be passed the
mutex and is responsible releasing that mutex when finished.
This can be used to defer the computation of expensive creator
functions to later points in the future by way of, for example, a
background thread, a long-running queue, or a task manager system
like Celery.
For a specific example using async_creation_runner, new values can
be created in a background thread like so::
import threading
def async_creation_runner(cache, somekey, creator, mutex):
''' Used by dogpile.core:Lock when appropriate '''
def runner():
try:
value = creator()
cache.set(somekey, value)
finally:
mutex.release()
thread = threading.Thread(target=runner)
thread.start()
region = make_region(
async_creation_runner=async_creation_runner,
).configure(
'dogpile.cache.memcached',
expiration_time=5,
arguments={
'url': '127.0.0.1:11211',
'distributed_lock': True,
}
)
Remember that the first request for a key with no associated
value will always block; async_creator will not be invoked.
However, subsequent requests for cached-but-expired values will
still return promptly. They will be refreshed by whatever
asynchronous means the provided async_creation_runner callable
implements.
By default the async_creation_runner is disabled and is set
to ``None``.
.. versionadded:: 0.4.2 added the async_creation_runner
feature.
"""
def __init__(
self,
name=None,
function_key_generator=function_key_generator,
function_multi_key_generator=function_multi_key_generator,
key_mangler=None,
async_creation_runner=None,
):
"""Construct a new :class:`.CacheRegion`."""
self.name = name
self.function_key_generator = function_key_generator
self.function_multi_key_generator = function_multi_key_generator
self.key_mangler = self._user_defined_key_mangler = key_mangler
self.async_creation_runner = async_creation_runner
self.region_invalidator = DefaultInvalidationStrategy()
def configure(
self, backend,
expiration_time=None,
arguments=None,
_config_argument_dict=None,
_config_prefix=None,
wrap=None,
replace_existing_backend=False,
region_invalidator=None
):
"""Configure a :class:`.CacheRegion`.
The :class:`.CacheRegion` itself
is returned.
:param backend: Required. This is the name of the
:class:`.CacheBackend` to use, and is resolved by loading
the class from the ``dogpile.cache`` entrypoint.
:param expiration_time: Optional. The expiration time passed
to the dogpile system. May be passed as an integer number
of seconds, or as a ``datetime.timedelta`` value.
.. versionadded 0.5.0
``expiration_time`` may be optionally passed as a
``datetime.timedelta`` value.
The :meth:`.CacheRegion.get_or_create`
method as well as the :meth:`.CacheRegion.cache_on_arguments`
decorator (though note: **not** the :meth:`.CacheRegion.get`
method) will call upon the value creation function after this
time period has passed since the last generation.
:param arguments: Optional. The structure here is passed
directly to the constructor of the :class:`.CacheBackend`
in use, though is typically a dictionary.
:param wrap: Optional. A list of :class:`.ProxyBackend`
classes and/or instances, each of which will be applied
in a chain to ultimately wrap the original backend,
so that custom functionality augmentation can be applied.
.. versionadded:: 0.5.0
.. seealso::
:ref:`changing_backend_behavior`
:param replace_existing_backend: if True, the existing cache backend
will be replaced. Without this flag, an exception is raised if
a backend is already configured.
.. versionadded:: 0.5.7
:param region_invalidator: Optional. Override default invalidation
strategy with custom implementation of
:class:`.RegionInvalidationStrategy`.
.. versionadded:: 0.6.2
"""
if "backend" in self.__dict__ and not replace_existing_backend:
raise exception.RegionAlreadyConfigured(
"This region is already "
"configured with backend: %s. "
"Specify replace_existing_backend=True to replace."
% self.backend)
backend_cls = _backend_loader.load(backend)
if _config_argument_dict:
self.backend = backend_cls.from_config_dict(
_config_argument_dict,
_config_prefix
)
else:
self.backend = backend_cls(arguments or {})
if not expiration_time or isinstance(expiration_time, Number):
self.expiration_time = expiration_time
elif isinstance(expiration_time, datetime.timedelta):
self.expiration_time = int(
compat.timedelta_total_seconds(expiration_time))
else:
raise exception.ValidationError(
'expiration_time is not a number or timedelta.')
if not self._user_defined_key_mangler:
self.key_mangler = self.backend.key_mangler
self._lock_registry = NameRegistry(self._create_mutex)
if getattr(wrap, '__iter__', False):
for wrapper in reversed(wrap):
self.wrap(wrapper)
if region_invalidator:
self.region_invalidator = region_invalidator
return self
def wrap(self, proxy):
''' Takes a ProxyBackend instance or class and wraps the
attached backend. '''
# if we were passed a type rather than an instance then
# initialize it.
if type(proxy) == type:
proxy = proxy()
if not issubclass(type(proxy), ProxyBackend):
raise TypeError("Type %s is not a valid ProxyBackend"
% type(proxy))
self.backend = proxy.wrap(self.backend)
def _mutex(self, key):
return self._lock_registry.get(key)
class _LockWrapper(object):
"""weakref-capable wrapper for threading.Lock"""
def __init__(self):
self.lock = threading.Lock()
def acquire(self, wait=True):
return self.lock.acquire(wait)
def release(self):
self.lock.release()
def _create_mutex(self, key):
mutex = self.backend.get_mutex(key)
if mutex is not None:
return mutex
else:
return self._LockWrapper()
def invalidate(self, hard=True):
"""Invalidate this :class:`.CacheRegion`.
The default invalidation system works by setting
a current timestamp (using ``time.time()``)
representing the "minimum creation time" for
a value. Any retrieved value whose creation
time is prior to this timestamp
is considered to be stale. It does not
affect the data in the cache in any way, and is also
local to this instance of :class:`.CacheRegion`.
Once set, the invalidation time is honored by
the :meth:`.CacheRegion.get_or_create`,
:meth:`.CacheRegion.get_or_create_multi` and
:meth:`.CacheRegion.get` methods.
The method supports both "hard" and "soft" invalidation
options. With "hard" invalidation,
:meth:`.CacheRegion.get_or_create` will force an immediate
regeneration of the value which all getters will wait for.
With "soft" invalidation, subsequent getters will return the
"old" value until the new one is available.
Usage of "soft" invalidation requires that the region or the method
is given a non-None expiration time.
.. versionadded:: 0.3.0
:param hard: if True, cache values will all require immediate
regeneration; dogpile logic won't be used. If False, the
creation time of existing values will be pushed back before
the expiration time so that a return+regen will be invoked.
.. versionadded:: 0.5.1
"""
self.region_invalidator.invalidate(hard)
def configure_from_config(self, config_dict, prefix):
"""Configure from a configuration dictionary
and a prefix.
Example::
local_region = make_region()
memcached_region = make_region()
# regions are ready to use for function
# decorators, but not yet for actual caching
# later, when config is available
myconfig = {
"cache.local.backend":"dogpile.cache.dbm",
"cache.local.arguments.filename":"/path/to/dbmfile.dbm",
"cache.memcached.backend":"dogpile.cache.pylibmc",
"cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1",
}
local_region.configure_from_config(myconfig, "cache.local.")
memcached_region.configure_from_config(myconfig,
"cache.memcached.")
"""
config_dict = coerce_string_conf(config_dict)
return self.configure(
config_dict["%sbackend" % prefix],
expiration_time=config_dict.get(
"%sexpiration_time" % prefix, None),
_config_argument_dict=config_dict,
_config_prefix="%sarguments." % prefix,
wrap=config_dict.get(
"%swrap" % prefix, None),
)
@memoized_property
def backend(self):
raise exception.RegionNotConfigured(
"No backend is configured on this region.")
@property
def is_configured(self):
"""Return True if the backend has been configured via the
:meth:`.CacheRegion.configure` method already.
.. versionadded:: 0.5.1
"""
return 'backend' in self.__dict__
def get(self, key, expiration_time=None, ignore_expiration=False):
"""Return a value from the cache, based on the given key.
If the value is not present, the method returns the token
``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionchanged:: 0.3.0
:meth:`.CacheRegion.get` now checks the value's creation time
against the expiration time, rather than returning
the value unconditionally.
The method also interprets the cached value in terms
of the current "invalidation" time as set by
the :meth:`.invalidate` method. If a value is present,
but its creation time is older than the current
invalidation time, the ``NO_VALUE`` token is returned.
Passing the flag ``ignore_expiration=True`` bypasses
the invalidation time check.
.. versionadded:: 0.3.0
Support for the :meth:`.CacheRegion.invalidate`
method.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param expiration_time: Optional expiration time value
which will supersede that configured on the :class:`.CacheRegion`
itself.
.. versionadded:: 0.3.0
:param ignore_expiration: if ``True``, the value is returned
from the cache if present, regardless of configured
expiration times or whether or not :meth:`.invalidate`
was called.
.. versionadded:: 0.3.0
"""
if self.key_mangler:
key = self.key_mangler(key)
value = self.backend.get(key)
value = self._unexpired_value_fn(
expiration_time, ignore_expiration)(value)
return value.payload
def _unexpired_value_fn(self, expiration_time, ignore_expiration):
if ignore_expiration:
return lambda value: value
else:
if expiration_time is None:
expiration_time = self.expiration_time
current_time = time.time()
def value_fn(value):
if value is NO_VALUE:
return value
elif expiration_time is not None and \
current_time - value.metadata["ct"] > expiration_time:
return NO_VALUE
elif self.region_invalidator.is_invalidated(
value.metadata["ct"]):
return NO_VALUE
else:
return value
return value_fn
def get_multi(self, keys, expiration_time=None, ignore_expiration=False):
"""Return multiple values from the cache, based on the given keys.
Returns values as a list matching the keys given.
E.g.::
values = region.get_multi(["one", "two", "three"])
To convert values to a dictionary, use ``zip()``::
keys = ["one", "two", "three"]
values = region.get_multi(keys)
dictionary = dict(zip(keys, values))
Keys which aren't present in the list are returned as
the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False,
but is separate from
``None`` to distinguish between a cached value of ``None``.
By default, the configured expiration time of the
:class:`.CacheRegion`, or alternatively the expiration
time supplied by the ``expiration_time`` argument,
is tested against the creation time of the retrieved
value versus the current time (as reported by ``time.time()``).
If stale, the cached value is ignored and the ``NO_VALUE``
token is returned. Passing the flag ``ignore_expiration=True``
bypasses the expiration time check.
.. versionadded:: 0.5.0
"""
if not keys:
return []
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
backend_values = self.backend.get_multi(keys)
_unexpired_value_fn = self._unexpired_value_fn(
expiration_time, ignore_expiration)
return [
value.payload if value is not NO_VALUE else value
for value in
(
_unexpired_value_fn(value) for value in
backend_values
)
]
def get_or_create(
self, key, creator, expiration_time=None, should_cache_fn=None):
"""Return a cached value based on the given key.
If the value does not exist or is considered to be expired
based on its creation time, the given
creation function may or may not be used to recreate the value
and persist the newly generated value in the cache.
Whether or not the function is used depends on if the
*dogpile lock* can be acquired or not. If it can't, it means
a different thread or process is already running a creation
function for this key against the cache. When the dogpile
lock cannot be acquired, the method will block if no
previous value is available, until the lock is released and
a new value available. If a previous value
is available, that value is returned immediately without blocking.
If the :meth:`.invalidate` method has been called, and
the retrieved value's timestamp is older than the invalidation
timestamp, the value is unconditionally prevented from
being returned. The method will attempt to acquire the dogpile
lock to generate a new value, or will wait
until the lock is released to return the new value.
.. versionchanged:: 0.3.0
The value is unconditionally regenerated if the creation
time is older than the last call to :meth:`.invalidate`.
:param key: Key to be retrieved. While it's typical for a key to be a
string, it is ultimately passed directly down to the cache backend,
before being optionally processed by the key_mangler function, so can
be of any type recognized by the backend or by the key_mangler
function, if present.
:param creator: function which creates a new value.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
the value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
E.g.::
def dont_cache_none(value):
return value is not None
value = region.get_or_create("some key",
create_value,
should_cache_fn=dont_cache_none)
Above, the function returns the value of create_value() if
the cache is invalid, however if the return value is None,
it won't be cached.
.. versionadded:: 0.4.3
.. seealso::
:meth:`.CacheRegion.cache_on_arguments` - applies
:meth:`.get_or_create` to any function using a decorator.
:meth:`.CacheRegion.get_or_create_multi` - multiple key/value
version
"""
orig_key = key
if self.key_mangler:
key = self.key_mangler(key)
def get_value():
value = self.backend.get(key)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata["ct"])):
raise NeedRegenerationException()
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
created_value = creator()
value = self._value(created_value)
if not should_cache_fn or \
should_cache_fn(created_value):
self.backend.set(key, value)
return value.payload, value.metadata["ct"]
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
if self.async_creation_runner:
def async_creator(mutex):
return self.async_creation_runner(
self, orig_key, creator, mutex)
else:
async_creator = None
with Lock(
self._mutex(key),
gen_value,
get_value,
expiration_time,
async_creator) as value:
return value
def get_or_create_multi(
self, keys, creator, expiration_time=None, should_cache_fn=None):
"""Return a sequence of cached values based on a sequence of keys.
The behavior for generation of values based on keys corresponds
to that of :meth:`.Region.get_or_create`, with the exception that
the ``creator()`` function may be asked to generate any subset of
the given keys. The list of keys to be generated is passed to
``creator()``, and ``creator()`` should return the generated values
as a sequence corresponding to the order of the keys.
The method uses the same approach as :meth:`.Region.get_multi`
and :meth:`.Region.set_multi` to get and set values from the
backend.
If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend`
that modifies values, take note this function invokes
``.set_multi()`` for newly generated values using the same values it
returns to the calling function. A correct implementation of
``.set_multi()`` will not modify values in-place on the submitted
``mapping`` dict.
:param keys: Sequence of keys to be retrieved.
:param creator: function which accepts a sequence of keys and
returns a sequence of new values.
:param expiration_time: optional expiration time which will overide
the expiration time already configured on this :class:`.CacheRegion`
if not None. To set no expiration, use the value -1.
:param should_cache_fn: optional callable function which will receive
each value returned by the "creator", and will then return True or
False, indicating if the value should actually be cached or not. If
it returns False, the value is still returned, but isn't cached.
.. versionadded:: 0.5.0
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
def get_value(key):
value = values.get(key, NO_VALUE)
if (value is NO_VALUE or value.metadata['v'] != value_version or
self.region_invalidator.is_hard_invalidated(
value.metadata['v'])):
# dogpile.core understands a 0 here as
# "the value is not available", e.g.
# _has_value() will return False.
return value.payload, 0
else:
ct = value.metadata["ct"]
if self.region_invalidator.is_soft_invalidated(ct):
ct = time.time() - expiration_time - .0001
return value.payload, ct
def gen_value():
raise NotImplementedError()
def async_creator(key, mutex):
mutexes[key] = mutex
if expiration_time is None:
expiration_time = self.expiration_time
if (expiration_time is None and
self.region_invalidator.was_soft_invalidated()):
raise exception.DogpileCacheException(
"Non-None expiration time required "
"for soft invalidation")
if expiration_time == -1:
expiration_time = None
mutexes = {}
sorted_unique_keys = sorted(set(keys))
if self.key_mangler:
mangled_keys = [self.key_mangler(k) for k in sorted_unique_keys]
else:
mangled_keys = sorted_unique_keys
orig_to_mangled = dict(zip(sorted_unique_keys, mangled_keys))
values = dict(zip(mangled_keys, self.backend.get_multi(mangled_keys)))
for orig_key, mangled_key in orig_to_mangled.items():
with Lock(
self._mutex(mangled_key),
gen_value,
lambda: get_value(mangled_key),
expiration_time,
async_creator=lambda mutex: async_creator(orig_key, mutex)
):
pass
try:
if mutexes:
# sort the keys, the idea is to prevent deadlocks.
# though haven't been able to simulate one anyway.
keys_to_get = sorted(mutexes)
new_values = creator(*keys_to_get)
values_w_created = dict(
(orig_to_mangled[k], self._value(v))
for k, v in zip(keys_to_get, new_values)
)
if not should_cache_fn:
self.backend.set_multi(values_w_created)
else:
self.backend.set_multi(dict(
(k, v)
for k, v in values_w_created.items()
if should_cache_fn(v[0])
))
values.update(values_w_created)
return [values[orig_to_mangled[k]].payload for k in keys]
finally:
for mutex in mutexes.values():
mutex.release()
def _value(self, value):
"""Return a :class:`.CachedValue` given a value."""
return CachedValue(
value,
{
"ct": time.time(),
"v": value_version
})
def set(self, key, value):
"""Place a new value in the cache under the given key."""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.set(key, self._value(value))
def set_multi(self, mapping):
"""Place new values in the cache under the given keys.
.. versionadded:: 0.5.0
"""
if not mapping:
return
if self.key_mangler:
mapping = dict((
self.key_mangler(k), self._value(v))
for k, v in mapping.items())
else:
mapping = dict((k, self._value(v)) for k, v in mapping.items())
self.backend.set_multi(mapping)
def delete(self, key):
"""Remove a value from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
"""
if self.key_mangler:
key = self.key_mangler(key)
self.backend.delete(key)
def delete_multi(self, keys):
"""Remove multiple values from the cache.
This operation is idempotent (can be called multiple times, or on a
non-existent key, safely)
.. versionadded:: 0.5.0
"""
if self.key_mangler:
keys = list(map(lambda key: self.key_mangler(key), keys))
self.backend.delete_multi(keys)
def cache_on_arguments(
self, namespace=None,
expiration_time=None,
should_cache_fn=None,
to_str=compat.string_type,
function_key_generator=None):
"""A function decorator that will cache the return
value of the function using a key derived from the
function itself and its arguments.
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
E.g.::
@someregion.cache_on_arguments()
def generate_something(x, y):
return somedatabase.query(x, y)
The decorated function can then be called normally, where
data will be pulled from the cache region unless a new
value is needed::
result = generate_something(5, 6)
The function is also given an attribute ``invalidate()``, which
provides for invalidation of the value. Pass to ``invalidate()``
the same arguments you'd pass to the function itself to represent
a particular value::
generate_something.invalidate(5, 6)
Another attribute ``set()`` is added to provide extra caching
possibilities relative to the function. This is a convenience
method for :meth:`.CacheRegion.set` which will store a given
value directly without calling the decorated function.
The value to be cached is passed as the first argument, and the
arguments which would normally be passed to the function
should follow::
generate_something.set(3, 5, 6)
The above example is equivalent to calling
``generate_something(5, 6)``, if the function were to produce
the value ``3`` as the value to be cached.
.. versionadded:: 0.4.1 Added ``set()`` method to decorated function.
Similar to ``set()`` is ``refresh()``. This attribute will
invoke the decorated function and populate a new value into
the cache with the new value, as well as returning that value::
newvalue = generate_something.refresh(5, 6)
.. versionadded:: 0.5.0 Added ``refresh()`` method to decorated
function.
Lastly, the ``get()`` method returns either the value cached
for the given key, or the token ``NO_VALUE`` if no such key
exists::
value = generate_something.get(5, 6)
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
The default key generation will use the name
of the function, the module name for the function,
the arguments passed, as well as an optional "namespace"
parameter in order to generate a cache key.
Given a function ``one`` inside the module
``myapp.tools``::
@region.cache_on_arguments(namespace="foo")
def one(a, b):
return a + b
Above, calling ``one(3, 4)`` will produce a
cache key as follows::
myapp.tools:one|foo|3 4
The key generator will ignore an initial argument
of ``self`` or ``cls``, making the decorator suitable
(with caveats) for use with instance or class methods.
Given the example::
class MyClass(object):
@region.cache_on_arguments(namespace="foo")
def one(self, a, b):
return a + b
The cache key above for ``MyClass().one(3, 4)`` will
again produce the same cache key of ``myapp.tools:one|foo|3 4`` -
the name ``self`` is skipped.
The ``namespace`` parameter is optional, and is used
normally to disambiguate two functions of the same
name within the same module, as can occur when decorating
instance or class methods as below::
class MyClass(object):
@region.cache_on_arguments(namespace='MC')
def somemethod(self, x, y):
""
class MyOtherClass(object):
@region.cache_on_arguments(namespace='MOC')
def somemethod(self, x, y):
""
Above, the ``namespace`` parameter disambiguates
between ``somemethod`` on ``MyClass`` and ``MyOtherClass``.
Python class declaration mechanics otherwise prevent
the decorator from having awareness of the ``MyClass``
and ``MyOtherClass`` names, as the function is received
by the decorator before it becomes an instance method.
The function key generation can be entirely replaced
on a per-region basis using the ``function_key_generator``
argument present on :func:`.make_region` and
:class:`.CacheRegion`. If defaults to
:func:`.function_key_generator`.
:param namespace: optional string argument which will be
established as part of the cache key. This may be needed
to disambiguate functions of the same name within the same
source file, such as those
associated with classes - note that the decorator itself
can't see the parent class on a function as the class is
being declared.
:param expiration_time: if not None, will override the normal
expiration time.
May be specified as a callable, taking no arguments, that
returns a value to be used as the ``expiration_time``. This callable
will be called whenever the decorated function itself is called, in
caching or retrieving. Thus, this can be used to
determine a *dynamic* expiration time for the cached function
result. Example use cases include "cache the result until the
end of the day, week or time period" and "cache until a certain date
or time passes".
.. versionchanged:: 0.5.0
``expiration_time`` may be passed as a callable to
:meth:`.CacheRegion.cache_on_arguments`.
:param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`.
.. versionadded:: 0.4.3
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_key_generator: a function that will produce a
"cache key". This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_multi_on_arguments`
:meth:`.CacheRegion.get_or_create`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_key_generator is None:
function_key_generator = self.function_key_generator
def decorator(fn):
if to_str is compat.string_type:
# backwards compatible
key_generator = function_key_generator(namespace, fn)
else:
key_generator = function_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
key = key_generator(*arg, **kw)
@wraps(fn)
def creator():
return fn(*arg, **kw)
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
return self.get_or_create(key, creator, timeout,
should_cache_fn)
def invalidate(*arg, **kw):
key = key_generator(*arg, **kw)
self.delete(key)
def set_(value, *arg, **kw):
key = key_generator(*arg, **kw)
self.set(key, value)
def get(*arg, **kw):
key = key_generator(*arg, **kw)
return self.get(key)
def refresh(*arg, **kw):
key = key_generator(*arg, **kw)
value = fn(*arg, **kw)
self.set(key, value)
return value
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
decorate.original = fn
return decorate
return decorator
def cache_multi_on_arguments(
self, namespace=None, expiration_time=None,
should_cache_fn=None,
asdict=False, to_str=compat.string_type,
function_multi_key_generator=None):
"""A function decorator that will cache multiple return
values from the function using a sequence of keys derived from the
function itself and the arguments passed to it.
This method is the "multiple key" analogue to the
:meth:`.CacheRegion.cache_on_arguments` method.
Example::
@someregion.cache_multi_on_arguments()
def generate_something(*keys):
return [
somedatabase.query(key)
for key in keys
]
The decorated function can be called normally. The decorator
will produce a list of cache keys using a mechanism similar to
that of :meth:`.CacheRegion.cache_on_arguments`, combining the
name of the function with the optional namespace and with the
string form of each key. It will then consult the cache using
the same mechanism as that of :meth:`.CacheRegion.get_multi`
to retrieve all current values; the originally passed keys
corresponding to those values which aren't generated or need
regeneration will be assembled into a new argument list, and
the decorated function is then called with that subset of
arguments.
The returned result is a list::
result = generate_something("key1", "key2", "key3")
The decorator internally makes use of the
:meth:`.CacheRegion.get_or_create_multi` method to access the
cache and conditionally call the function. See that
method for additional behavioral details.
Unlike the :meth:`.CacheRegion.cache_on_arguments` method,
:meth:`.CacheRegion.cache_multi_on_arguments` works only with
a single function signature, one which takes a simple list of
keys as arguments.
Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function
is also provided with a ``set()`` method, which here accepts a
mapping of keys and values to set in the cache::
generate_something.set({"k1": "value1",
"k2": "value2", "k3": "value3"})
...an ``invalidate()`` method, which has the effect of deleting
the given sequence of keys using the same mechanism as that of
:meth:`.CacheRegion.delete_multi`::
generate_something.invalidate("k1", "k2", "k3")
...a ``refresh()`` method, which will call the creation
function, cache the new values, and return them::
values = generate_something.refresh("k1", "k2", "k3")
...and a ``get()`` method, which will return values
based on the given arguments::
values = generate_something.get("k1", "k2", "k3")
.. versionadded:: 0.5.3 Added ``get()`` method to decorated
function.
Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments`
have the same meaning as those passed to
:meth:`.CacheRegion.cache_on_arguments`.
:param namespace: optional string argument which will be
established as part of each cache key.
:param expiration_time: if not None, will override the normal
expiration time. May be passed as an integer or a
callable.
:param should_cache_fn: passed to
:meth:`.CacheRegion.get_or_create_multi`. This function is given a
value as returned by the creator, and only if it returns True will
that value be placed in the cache.
:param asdict: if ``True``, the decorated function should return
its result as a dictionary of keys->values, and the final result
of calling the decorated function will also be a dictionary.
If left at its default value of ``False``, the decorated function
should return its result as a list of values, and the final
result of calling the decorated function will also be a list.
When ``asdict==True`` if the dictionary returned by the decorated
function is missing keys, those keys will not be cached.
:param to_str: callable, will be called on each function argument
in order to convert to a string. Defaults to ``str()``. If the
function accepts non-ascii unicode arguments on Python 2.x, the
``unicode()`` builtin can be substituted, but note this will
produce unicode cache keys which may require key mangling before
reaching the cache.
.. versionadded:: 0.5.0
:param function_multi_key_generator: a function that will produce a
list of keys. This function will supersede the one configured on the
:class:`.CacheRegion` itself.
.. versionadded:: 0.5.5
.. seealso::
:meth:`.CacheRegion.cache_on_arguments`
:meth:`.CacheRegion.get_or_create_multi`
"""
expiration_time_is_callable = compat.callable(expiration_time)
if function_multi_key_generator is None:
function_multi_key_generator = self.function_multi_key_generator
def decorator(fn):
key_generator = function_multi_key_generator(
namespace, fn,
to_str=to_str)
@wraps(fn)
def decorate(*arg, **kw):
cache_keys = arg
keys = key_generator(*arg, **kw)
key_lookup = dict(zip(keys, cache_keys))
@wraps(fn)
def creator(*keys_to_create):
return fn(*[key_lookup[k] for k in keys_to_create])
timeout = expiration_time() if expiration_time_is_callable \
else expiration_time
if asdict:
def dict_create(*keys):
d_values = creator(*keys)
return [
d_values.get(key_lookup[k], NO_VALUE)
for k in keys]
def wrap_cache_fn(value):
if value is NO_VALUE:
return False
elif not should_cache_fn:
return True
else:
return should_cache_fn(value)
result = self.get_or_create_multi(
keys, dict_create, timeout, wrap_cache_fn)
result = dict(
(k, v) for k, v in zip(cache_keys, result)
if v is not NO_VALUE)
else:
result = self.get_or_create_multi(
keys, creator, timeout,
should_cache_fn)
return result
def invalidate(*arg):
keys = key_generator(*arg)
self.delete_multi(keys)
def set_(mapping):
keys = list(mapping)
gen_keys = key_generator(*keys)
self.set_multi(dict(
(gen_key, mapping[key])
for gen_key, key
in zip(gen_keys, keys))
)
def get(*arg):
keys = key_generator(*arg)
return self.get_multi(keys)
def refresh(*arg):
keys = key_generator(*arg)
values = fn(*arg)
if asdict:
self.set_multi(
dict(zip(keys, [values[a] for a in arg]))
)
return values
else:
self.set_multi(
dict(zip(keys, values))
)
return values
decorate.set = set_
decorate.invalidate = invalidate
decorate.refresh = refresh
decorate.get = get
return decorate
return decorator
def make_region(*arg, **kw):
"""Instantiate a new :class:`.CacheRegion`.
Currently, :func:`.make_region` is a passthrough
to :class:`.CacheRegion`. See that class for
constructor arguments.
"""
return CacheRegion(*arg, **kw) | en | 0.708371 | # noqa An integer placed in the :class:`.CachedValue` so that new versions of dogpile.cache can detect cached values from a previous, backwards-incompatible version. Region invalidation strategy interface Implement this interface and pass implementation instance to :meth:`.CacheRegion.configure` to override default region invalidation. Example:: class CustomInvalidationStrategy(RegionInvalidationStrategy): def __init__(self): self._soft_invalidated = None self._hard_invalidated = None def invalidate(self, hard=None): if hard: self._soft_invalidated = None self._hard_invalidated = time.time() else: self._soft_invalidated = time.time() self._hard_invalidated = None def is_invalidated(self, timestamp): return ((self._soft_invalidated and timestamp < self._soft_invalidated) or (self._hard_invalidated and timestamp < self._hard_invalidated)) def was_hard_invalidated(self): return bool(self._hard_invalidated) def is_hard_invalidated(self, timestamp): return (self._hard_invalidated and timestamp < self._hard_invalidated) def was_soft_invalidated(self): return bool(self._soft_invalidated) def is_soft_invalidated(self, timestamp): return (self._soft_invalidated and timestamp < self._soft_invalidated) The custom implementation is injected into a :class:`.CacheRegion` at configure time using the :paramref:`.CacheRegion.configure.region_invalidator` parameter:: region = CacheRegion() region = region.configure(region_invalidator=CustomInvalidationStrategy()) Invalidation strategies that wish to have access to the :class:`.CacheRegion` itself should construct the invalidator given the region as an argument:: class MyInvalidator(RegionInvalidationStrategy): def __init__(self, region): self.region = region # ... # ... region = CacheRegion() region = region.configure(region_invalidator=MyInvalidator(region)) .. versionadded:: 0.6.2 .. seealso:: :paramref:`.CacheRegion.configure.region_invalidator` Region invalidation. :class:`.CacheRegion` propagated call. The default invalidation system works by setting a current timestamp (using ``time.time()``) to consider all older timestamps effectively invalidated. Check timestamp to determine if it was hard invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in hard mode. Check timestamp to determine if it was soft invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time and region is invalidated in soft mode. Check timestamp to determine if it was invalidated. :return: Boolean. True if ``timestamp`` is older than the last region invalidation time. Indicate the region was invalidated in soft mode. :return: Boolean. True if region was invalidated in soft mode. Indicate the region was invalidated in hard mode. :return: Boolean. True if region was invalidated in hard mode. A front end to a particular cache backend. :param name: Optional, a string name for the region. This isn't used internally but can be accessed via the ``.name`` parameter, helpful for configuring a region from a config file. :param function_key_generator: Optional. A function that will produce a "cache key" given a data creation function and arguments, when using the :meth:`.CacheRegion.cache_on_arguments` method. The structure of this function should be two levels: given the data creation function, return a new function that generates the key based on the given arguments. Such as:: def my_key_generator(namespace, fn, **kw): fname = fn.__name__ def generate_key(*arg): return namespace + "_" + fname + "_".join(str(s) for s in arg) return generate_key region = make_region( function_key_generator = my_key_generator ).configure( "dogpile.cache.dbm", expiration_time=300, arguments={ "filename":"file.dbm" } ) The ``namespace`` is that passed to :meth:`.CacheRegion.cache_on_arguments`. It's not consulted outside this function, so in fact can be of any form. For example, it can be passed as a tuple, used to specify arguments to pluck from \**kw:: def my_key_generator(namespace, fn): def generate_key(*arg, **kw): return ":".join( [kw[k] for k in namespace] + [str(x) for x in arg] ) return generate_key Where the decorator might be used as:: @my_region.cache_on_arguments(namespace=('x', 'y')) def my_function(a, b, **kw): return my_data() .. seealso:: :func:`.function_key_generator` - default key generator :func:`.kwarg_function_key_generator` - optional gen that also uses keyword arguments :param function_multi_key_generator: Optional. Similar to ``function_key_generator`` parameter, but it's used in :meth:`.CacheRegion.cache_multi_on_arguments`. Generated function should return list of keys. For example:: def my_multi_key_generator(namespace, fn, **kw): namespace = fn.__name__ + (namespace or '') def generate_keys(*args): return [namespace + ':' + str(a) for a in args] return generate_keys :param key_mangler: Function which will be used on all incoming keys before passing to the backend. Defaults to ``None``, in which case the key mangling function recommended by the cache backend will be used. A typical mangler is the SHA1 mangler found at :func:`.sha1_mangle_key` which coerces keys into a SHA1 hash, so that the string length is fixed. To disable all key mangling, set to ``False``. Another typical mangler is the built-in Python function ``str``, which can be used to convert non-string or Unicode keys to bytestrings, which is needed when using a backend such as bsddb or dbm under Python 2.x in conjunction with Unicode keys. :param async_creation_runner: A callable that, when specified, will be passed to and called by dogpile.lock when there is a stale value present in the cache. It will be passed the mutex and is responsible releasing that mutex when finished. This can be used to defer the computation of expensive creator functions to later points in the future by way of, for example, a background thread, a long-running queue, or a task manager system like Celery. For a specific example using async_creation_runner, new values can be created in a background thread like so:: import threading def async_creation_runner(cache, somekey, creator, mutex): ''' Used by dogpile.core:Lock when appropriate ''' def runner(): try: value = creator() cache.set(somekey, value) finally: mutex.release() thread = threading.Thread(target=runner) thread.start() region = make_region( async_creation_runner=async_creation_runner, ).configure( 'dogpile.cache.memcached', expiration_time=5, arguments={ 'url': '127.0.0.1:11211', 'distributed_lock': True, } ) Remember that the first request for a key with no associated value will always block; async_creator will not be invoked. However, subsequent requests for cached-but-expired values will still return promptly. They will be refreshed by whatever asynchronous means the provided async_creation_runner callable implements. By default the async_creation_runner is disabled and is set to ``None``. .. versionadded:: 0.4.2 added the async_creation_runner feature. Construct a new :class:`.CacheRegion`. Configure a :class:`.CacheRegion`. The :class:`.CacheRegion` itself is returned. :param backend: Required. This is the name of the :class:`.CacheBackend` to use, and is resolved by loading the class from the ``dogpile.cache`` entrypoint. :param expiration_time: Optional. The expiration time passed to the dogpile system. May be passed as an integer number of seconds, or as a ``datetime.timedelta`` value. .. versionadded 0.5.0 ``expiration_time`` may be optionally passed as a ``datetime.timedelta`` value. The :meth:`.CacheRegion.get_or_create` method as well as the :meth:`.CacheRegion.cache_on_arguments` decorator (though note: **not** the :meth:`.CacheRegion.get` method) will call upon the value creation function after this time period has passed since the last generation. :param arguments: Optional. The structure here is passed directly to the constructor of the :class:`.CacheBackend` in use, though is typically a dictionary. :param wrap: Optional. A list of :class:`.ProxyBackend` classes and/or instances, each of which will be applied in a chain to ultimately wrap the original backend, so that custom functionality augmentation can be applied. .. versionadded:: 0.5.0 .. seealso:: :ref:`changing_backend_behavior` :param replace_existing_backend: if True, the existing cache backend will be replaced. Without this flag, an exception is raised if a backend is already configured. .. versionadded:: 0.5.7 :param region_invalidator: Optional. Override default invalidation strategy with custom implementation of :class:`.RegionInvalidationStrategy`. .. versionadded:: 0.6.2 Takes a ProxyBackend instance or class and wraps the attached backend. # if we were passed a type rather than an instance then # initialize it. weakref-capable wrapper for threading.Lock Invalidate this :class:`.CacheRegion`. The default invalidation system works by setting a current timestamp (using ``time.time()``) representing the "minimum creation time" for a value. Any retrieved value whose creation time is prior to this timestamp is considered to be stale. It does not affect the data in the cache in any way, and is also local to this instance of :class:`.CacheRegion`. Once set, the invalidation time is honored by the :meth:`.CacheRegion.get_or_create`, :meth:`.CacheRegion.get_or_create_multi` and :meth:`.CacheRegion.get` methods. The method supports both "hard" and "soft" invalidation options. With "hard" invalidation, :meth:`.CacheRegion.get_or_create` will force an immediate regeneration of the value which all getters will wait for. With "soft" invalidation, subsequent getters will return the "old" value until the new one is available. Usage of "soft" invalidation requires that the region or the method is given a non-None expiration time. .. versionadded:: 0.3.0 :param hard: if True, cache values will all require immediate regeneration; dogpile logic won't be used. If False, the creation time of existing values will be pushed back before the expiration time so that a return+regen will be invoked. .. versionadded:: 0.5.1 Configure from a configuration dictionary and a prefix. Example:: local_region = make_region() memcached_region = make_region() # regions are ready to use for function # decorators, but not yet for actual caching # later, when config is available myconfig = { "cache.local.backend":"dogpile.cache.dbm", "cache.local.arguments.filename":"/path/to/dbmfile.dbm", "cache.memcached.backend":"dogpile.cache.pylibmc", "cache.memcached.arguments.url":"127.0.0.1, 10.0.0.1", } local_region.configure_from_config(myconfig, "cache.local.") memcached_region.configure_from_config(myconfig, "cache.memcached.") Return True if the backend has been configured via the :meth:`.CacheRegion.configure` method already. .. versionadded:: 0.5.1 Return a value from the cache, based on the given key. If the value is not present, the method returns the token ``NO_VALUE``. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionchanged:: 0.3.0 :meth:`.CacheRegion.get` now checks the value's creation time against the expiration time, rather than returning the value unconditionally. The method also interprets the cached value in terms of the current "invalidation" time as set by the :meth:`.invalidate` method. If a value is present, but its creation time is older than the current invalidation time, the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the invalidation time check. .. versionadded:: 0.3.0 Support for the :meth:`.CacheRegion.invalidate` method. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param expiration_time: Optional expiration time value which will supersede that configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.3.0 :param ignore_expiration: if ``True``, the value is returned from the cache if present, regardless of configured expiration times or whether or not :meth:`.invalidate` was called. .. versionadded:: 0.3.0 Return multiple values from the cache, based on the given keys. Returns values as a list matching the keys given. E.g.:: values = region.get_multi(["one", "two", "three"]) To convert values to a dictionary, use ``zip()``:: keys = ["one", "two", "three"] values = region.get_multi(keys) dictionary = dict(zip(keys, values)) Keys which aren't present in the list are returned as the ``NO_VALUE`` token. ``NO_VALUE`` evaluates to False, but is separate from ``None`` to distinguish between a cached value of ``None``. By default, the configured expiration time of the :class:`.CacheRegion`, or alternatively the expiration time supplied by the ``expiration_time`` argument, is tested against the creation time of the retrieved value versus the current time (as reported by ``time.time()``). If stale, the cached value is ignored and the ``NO_VALUE`` token is returned. Passing the flag ``ignore_expiration=True`` bypasses the expiration time check. .. versionadded:: 0.5.0 Return a cached value based on the given key. If the value does not exist or is considered to be expired based on its creation time, the given creation function may or may not be used to recreate the value and persist the newly generated value in the cache. Whether or not the function is used depends on if the *dogpile lock* can be acquired or not. If it can't, it means a different thread or process is already running a creation function for this key against the cache. When the dogpile lock cannot be acquired, the method will block if no previous value is available, until the lock is released and a new value available. If a previous value is available, that value is returned immediately without blocking. If the :meth:`.invalidate` method has been called, and the retrieved value's timestamp is older than the invalidation timestamp, the value is unconditionally prevented from being returned. The method will attempt to acquire the dogpile lock to generate a new value, or will wait until the lock is released to return the new value. .. versionchanged:: 0.3.0 The value is unconditionally regenerated if the creation time is older than the last call to :meth:`.invalidate`. :param key: Key to be retrieved. While it's typical for a key to be a string, it is ultimately passed directly down to the cache backend, before being optionally processed by the key_mangler function, so can be of any type recognized by the backend or by the key_mangler function, if present. :param creator: function which creates a new value. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive the value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. E.g.:: def dont_cache_none(value): return value is not None value = region.get_or_create("some key", create_value, should_cache_fn=dont_cache_none) Above, the function returns the value of create_value() if the cache is invalid, however if the return value is None, it won't be cached. .. versionadded:: 0.4.3 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` - applies :meth:`.get_or_create` to any function using a decorator. :meth:`.CacheRegion.get_or_create_multi` - multiple key/value version Return a sequence of cached values based on a sequence of keys. The behavior for generation of values based on keys corresponds to that of :meth:`.Region.get_or_create`, with the exception that the ``creator()`` function may be asked to generate any subset of the given keys. The list of keys to be generated is passed to ``creator()``, and ``creator()`` should return the generated values as a sequence corresponding to the order of the keys. The method uses the same approach as :meth:`.Region.get_multi` and :meth:`.Region.set_multi` to get and set values from the backend. If you are using a :class:`.CacheBackend` or :class:`.ProxyBackend` that modifies values, take note this function invokes ``.set_multi()`` for newly generated values using the same values it returns to the calling function. A correct implementation of ``.set_multi()`` will not modify values in-place on the submitted ``mapping`` dict. :param keys: Sequence of keys to be retrieved. :param creator: function which accepts a sequence of keys and returns a sequence of new values. :param expiration_time: optional expiration time which will overide the expiration time already configured on this :class:`.CacheRegion` if not None. To set no expiration, use the value -1. :param should_cache_fn: optional callable function which will receive each value returned by the "creator", and will then return True or False, indicating if the value should actually be cached or not. If it returns False, the value is still returned, but isn't cached. .. versionadded:: 0.5.0 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` # dogpile.core understands a 0 here as # "the value is not available", e.g. # _has_value() will return False. # sort the keys, the idea is to prevent deadlocks. # though haven't been able to simulate one anyway. Return a :class:`.CachedValue` given a value. Place a new value in the cache under the given key. Place new values in the cache under the given keys. .. versionadded:: 0.5.0 Remove a value from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) Remove multiple values from the cache. This operation is idempotent (can be called multiple times, or on a non-existent key, safely) .. versionadded:: 0.5.0 A function decorator that will cache the return value of the function using a key derived from the function itself and its arguments. The decorator internally makes use of the :meth:`.CacheRegion.get_or_create` method to access the cache and conditionally call the function. See that method for additional behavioral details. E.g.:: @someregion.cache_on_arguments() def generate_something(x, y): return somedatabase.query(x, y) The decorated function can then be called normally, where data will be pulled from the cache region unless a new value is needed:: result = generate_something(5, 6) The function is also given an attribute ``invalidate()``, which provides for invalidation of the value. Pass to ``invalidate()`` the same arguments you'd pass to the function itself to represent a particular value:: generate_something.invalidate(5, 6) Another attribute ``set()`` is added to provide extra caching possibilities relative to the function. This is a convenience method for :meth:`.CacheRegion.set` which will store a given value directly without calling the decorated function. The value to be cached is passed as the first argument, and the arguments which would normally be passed to the function should follow:: generate_something.set(3, 5, 6) The above example is equivalent to calling ``generate_something(5, 6)``, if the function were to produce the value ``3`` as the value to be cached. .. versionadded:: 0.4.1 Added ``set()`` method to decorated function. Similar to ``set()`` is ``refresh()``. This attribute will invoke the decorated function and populate a new value into the cache with the new value, as well as returning that value:: newvalue = generate_something.refresh(5, 6) .. versionadded:: 0.5.0 Added ``refresh()`` method to decorated function. Lastly, the ``get()`` method returns either the value cached for the given key, or the token ``NO_VALUE`` if no such key exists:: value = generate_something.get(5, 6) .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. The default key generation will use the name of the function, the module name for the function, the arguments passed, as well as an optional "namespace" parameter in order to generate a cache key. Given a function ``one`` inside the module ``myapp.tools``:: @region.cache_on_arguments(namespace="foo") def one(a, b): return a + b Above, calling ``one(3, 4)`` will produce a cache key as follows:: myapp.tools:one|foo|3 4 The key generator will ignore an initial argument of ``self`` or ``cls``, making the decorator suitable (with caveats) for use with instance or class methods. Given the example:: class MyClass(object): @region.cache_on_arguments(namespace="foo") def one(self, a, b): return a + b The cache key above for ``MyClass().one(3, 4)`` will again produce the same cache key of ``myapp.tools:one|foo|3 4`` - the name ``self`` is skipped. The ``namespace`` parameter is optional, and is used normally to disambiguate two functions of the same name within the same module, as can occur when decorating instance or class methods as below:: class MyClass(object): @region.cache_on_arguments(namespace='MC') def somemethod(self, x, y): "" class MyOtherClass(object): @region.cache_on_arguments(namespace='MOC') def somemethod(self, x, y): "" Above, the ``namespace`` parameter disambiguates between ``somemethod`` on ``MyClass`` and ``MyOtherClass``. Python class declaration mechanics otherwise prevent the decorator from having awareness of the ``MyClass`` and ``MyOtherClass`` names, as the function is received by the decorator before it becomes an instance method. The function key generation can be entirely replaced on a per-region basis using the ``function_key_generator`` argument present on :func:`.make_region` and :class:`.CacheRegion`. If defaults to :func:`.function_key_generator`. :param namespace: optional string argument which will be established as part of the cache key. This may be needed to disambiguate functions of the same name within the same source file, such as those associated with classes - note that the decorator itself can't see the parent class on a function as the class is being declared. :param expiration_time: if not None, will override the normal expiration time. May be specified as a callable, taking no arguments, that returns a value to be used as the ``expiration_time``. This callable will be called whenever the decorated function itself is called, in caching or retrieving. Thus, this can be used to determine a *dynamic* expiration time for the cached function result. Example use cases include "cache the result until the end of the day, week or time period" and "cache until a certain date or time passes". .. versionchanged:: 0.5.0 ``expiration_time`` may be passed as a callable to :meth:`.CacheRegion.cache_on_arguments`. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create`. .. versionadded:: 0.4.3 :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_key_generator: a function that will produce a "cache key". This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_multi_on_arguments` :meth:`.CacheRegion.get_or_create` # backwards compatible A function decorator that will cache multiple return values from the function using a sequence of keys derived from the function itself and the arguments passed to it. This method is the "multiple key" analogue to the :meth:`.CacheRegion.cache_on_arguments` method. Example:: @someregion.cache_multi_on_arguments() def generate_something(*keys): return [ somedatabase.query(key) for key in keys ] The decorated function can be called normally. The decorator will produce a list of cache keys using a mechanism similar to that of :meth:`.CacheRegion.cache_on_arguments`, combining the name of the function with the optional namespace and with the string form of each key. It will then consult the cache using the same mechanism as that of :meth:`.CacheRegion.get_multi` to retrieve all current values; the originally passed keys corresponding to those values which aren't generated or need regeneration will be assembled into a new argument list, and the decorated function is then called with that subset of arguments. The returned result is a list:: result = generate_something("key1", "key2", "key3") The decorator internally makes use of the :meth:`.CacheRegion.get_or_create_multi` method to access the cache and conditionally call the function. See that method for additional behavioral details. Unlike the :meth:`.CacheRegion.cache_on_arguments` method, :meth:`.CacheRegion.cache_multi_on_arguments` works only with a single function signature, one which takes a simple list of keys as arguments. Like :meth:`.CacheRegion.cache_on_arguments`, the decorated function is also provided with a ``set()`` method, which here accepts a mapping of keys and values to set in the cache:: generate_something.set({"k1": "value1", "k2": "value2", "k3": "value3"}) ...an ``invalidate()`` method, which has the effect of deleting the given sequence of keys using the same mechanism as that of :meth:`.CacheRegion.delete_multi`:: generate_something.invalidate("k1", "k2", "k3") ...a ``refresh()`` method, which will call the creation function, cache the new values, and return them:: values = generate_something.refresh("k1", "k2", "k3") ...and a ``get()`` method, which will return values based on the given arguments:: values = generate_something.get("k1", "k2", "k3") .. versionadded:: 0.5.3 Added ``get()`` method to decorated function. Parameters passed to :meth:`.CacheRegion.cache_multi_on_arguments` have the same meaning as those passed to :meth:`.CacheRegion.cache_on_arguments`. :param namespace: optional string argument which will be established as part of each cache key. :param expiration_time: if not None, will override the normal expiration time. May be passed as an integer or a callable. :param should_cache_fn: passed to :meth:`.CacheRegion.get_or_create_multi`. This function is given a value as returned by the creator, and only if it returns True will that value be placed in the cache. :param asdict: if ``True``, the decorated function should return its result as a dictionary of keys->values, and the final result of calling the decorated function will also be a dictionary. If left at its default value of ``False``, the decorated function should return its result as a list of values, and the final result of calling the decorated function will also be a list. When ``asdict==True`` if the dictionary returned by the decorated function is missing keys, those keys will not be cached. :param to_str: callable, will be called on each function argument in order to convert to a string. Defaults to ``str()``. If the function accepts non-ascii unicode arguments on Python 2.x, the ``unicode()`` builtin can be substituted, but note this will produce unicode cache keys which may require key mangling before reaching the cache. .. versionadded:: 0.5.0 :param function_multi_key_generator: a function that will produce a list of keys. This function will supersede the one configured on the :class:`.CacheRegion` itself. .. versionadded:: 0.5.5 .. seealso:: :meth:`.CacheRegion.cache_on_arguments` :meth:`.CacheRegion.get_or_create_multi` Instantiate a new :class:`.CacheRegion`. Currently, :func:`.make_region` is a passthrough to :class:`.CacheRegion`. See that class for constructor arguments. | 2.248022 | 2 |
projectparallelprogrammeren/codesimulatie.py | fury106/ProjectParallelProgrammeren | 0 | 494 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.codesimulatie
=================================================================
Deze module simuleert alles.
"""
import projectparallelprogrammeren
def simulatie():
"""
Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing.
"""
from importlib import import_module
for i in range(4):
#alle versies van de simulatie importeren en achtereenvolgens uitvoeren.
version = f"montecarlo_v{i}"
montecarlo = import_module(version)
montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test
if __name__ == "__main__":
simulatie()
#eof
| # -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.codesimulatie
=================================================================
Deze module simuleert alles.
"""
import projectparallelprogrammeren
def simulatie():
"""
Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing.
"""
from importlib import import_module
for i in range(4):
#alle versies van de simulatie importeren en achtereenvolgens uitvoeren.
version = f"montecarlo_v{i}"
montecarlo = import_module(version)
montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test
if __name__ == "__main__":
simulatie()
#eof | nl | 0.82451 | # -*- coding: utf-8 -*- Module projectparallelprogrammeren.codesimulatie ================================================================= Deze module simuleert alles. Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing. #alle versies van de simulatie importeren en achtereenvolgens uitvoeren. #Deze waarden dienen enkel als test #eof | 2.441456 | 2 |
test/test_aes.py | haruhi-dl/haruhi-dl | 32 | 495 | <gh_stars>10-100
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes
import base64
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
class TestAES(unittest.TestCase):
def setUp(self):
self.key = self.iv = [0x20, 0x15] + 14 * [0]
self.secret_msg = b'Secret message goes here'
def test_encrypt(self):
msg = b'message'
key = list(range(16))
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
self.assertEqual(decrypted, msg)
def test_cbc_decrypt(self):
data = bytes_to_intlist(
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
)
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
def test_cbc_encrypt(self):
data = bytes_to_intlist(self.secret_msg)
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
self.assertEqual(
encrypted,
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes
import base64
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
class TestAES(unittest.TestCase):
def setUp(self):
self.key = self.iv = [0x20, 0x15] + 14 * [0]
self.secret_msg = b'Secret message goes here'
def test_encrypt(self):
msg = b'message'
key = list(range(16))
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
self.assertEqual(decrypted, msg)
def test_cbc_decrypt(self):
data = bytes_to_intlist(
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
)
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
def test_cbc_encrypt(self):
data = bytes_to_intlist(self.secret_msg)
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
self.assertEqual(
encrypted,
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)
if __name__ == '__main__':
unittest.main() | en | 0.460214 | #!/usr/bin/env python # Allow direct execution # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' | 2.428678 | 2 |
tests/unit/detection/test_detection_notebooks.py | titipakorn/computervision-recipes | 2 | 496 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This test is based on the test suite implemented for Recommenders project
# https://github.com/Microsoft/Recommenders/tree/master/tests
import papermill as pm
import pytest
import scrapbook as sb
from utils_cv.common.data import unzip_url
from utils_cv.detection.data import Urls
# Unless manually modified, python3 should be
# the name of the current jupyter kernel
# that runs on the activated conda environment
KERNEL_NAME = "python3"
OUTPUT_NOTEBOOK = "output.ipynb"
@pytest.mark.notebooks
def test_00_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["00"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["detection_bounding_box"].data) > 0
@pytest.mark.gpu
@pytest.mark.notebooks
def test_01_notebook_run(detection_notebooks, tiny_od_data_path):
notebook_path = detection_notebooks["01"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_03_notebook_run(
detection_notebooks, tiny_od_keypoint_data_path, tmp_session
):
notebook_path = detection_notebooks["03"]
data_path2 = unzip_url(
Urls.fridge_objects_keypoint_top_bottom_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
IM_SIZE=100,
EPOCHS=1,
DATA_PATH=tiny_od_keypoint_data_path,
DATA_PATH2=data_path2,
THRESHOLD=0.01,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["keypoints"].data) == len(
nb_output.scraps["bboxes"].data
)
@pytest.mark.gpu
@pytest.mark.notebooks
def test_12_notebook_run(
detection_notebooks, tiny_od_data_path, tiny_ic_negatives_path
):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
NEG_DATA_PATH=tiny_ic_negatives_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["valid_accs"].data) == 1
assert 5 <= len(nb_output.scraps["hard_im_scores"].data) <= 10
| # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# This test is based on the test suite implemented for Recommenders project
# https://github.com/Microsoft/Recommenders/tree/master/tests
import papermill as pm
import pytest
import scrapbook as sb
from utils_cv.common.data import unzip_url
from utils_cv.detection.data import Urls
# Unless manually modified, python3 should be
# the name of the current jupyter kernel
# that runs on the activated conda environment
KERNEL_NAME = "python3"
OUTPUT_NOTEBOOK = "output.ipynb"
@pytest.mark.notebooks
def test_00_notebook_run(detection_notebooks):
notebook_path = detection_notebooks["00"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(PM_VERSION=pm.__version__),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["detection_bounding_box"].data) > 0
@pytest.mark.gpu
@pytest.mark.notebooks
def test_01_notebook_run(detection_notebooks, tiny_od_data_path):
notebook_path = detection_notebooks["01"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_02_notebook_run(detection_notebooks, tiny_od_mask_data_path):
notebook_path = detection_notebooks["02"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_mask_data_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["training_losses"].data) > 0
training_aps = nb_output.scraps["training_average_precision"].data
assert len(training_aps) > 0
for d in training_aps:
assert isinstance(d, dict)
assert len(set([len(d) for d in training_aps])) == 1
@pytest.mark.gpu
@pytest.mark.notebooks
def test_03_notebook_run(
detection_notebooks, tiny_od_keypoint_data_path, tmp_session
):
notebook_path = detection_notebooks["03"]
data_path2 = unzip_url(
Urls.fridge_objects_keypoint_top_bottom_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
IM_SIZE=100,
EPOCHS=1,
DATA_PATH=tiny_od_keypoint_data_path,
DATA_PATH2=data_path2,
THRESHOLD=0.01,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["keypoints"].data) == len(
nb_output.scraps["bboxes"].data
)
@pytest.mark.gpu
@pytest.mark.notebooks
def test_12_notebook_run(
detection_notebooks, tiny_od_data_path, tiny_ic_negatives_path
):
notebook_path = detection_notebooks["12"]
pm.execute_notebook(
notebook_path,
OUTPUT_NOTEBOOK,
parameters=dict(
PM_VERSION=pm.__version__,
DATA_PATH=tiny_od_data_path,
NEG_DATA_PATH=tiny_ic_negatives_path,
EPOCHS=1,
IM_SIZE=100,
),
kernel_name=KERNEL_NAME,
)
nb_output = sb.read_notebook(OUTPUT_NOTEBOOK)
assert len(nb_output.scraps["valid_accs"].data) == 1
assert 5 <= len(nb_output.scraps["hard_im_scores"].data) <= 10
| en | 0.829078 | # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. # This test is based on the test suite implemented for Recommenders project # https://github.com/Microsoft/Recommenders/tree/master/tests # Unless manually modified, python3 should be # the name of the current jupyter kernel # that runs on the activated conda environment | 1.960817 | 2 |
configs/HDR/hdr/retinanet_r50_fpn_1x_coco_hdr_minmax_glob_gamma_2.py | ismailkocdemir/mmdetection | 0 | 497 | <gh_stars>0
_base_ = [
'../retinanet_r50_fpn_1x_coco.py',
'../../_base_/datasets/hdr_detection_minmax_glob_gamma.py',
]
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[10])
runner = dict(
type='EpochBasedRunner', max_epochs=20)
| _base_ = [
'../retinanet_r50_fpn_1x_coco.py',
'../../_base_/datasets/hdr_detection_minmax_glob_gamma.py',
]
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.0005, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None) # dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[10])
runner = dict(
type='EpochBasedRunner', max_epochs=20) | en | 0.637859 | # optimizer # lr is set for a batch size of 8 # dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy | 1.41861 | 1 |
dj_twitter_clone_app/core_config/settings/staging.py | ivanprytula/dj_demo_app | 0 | 498 | """Placeholder/template for staging envs."""
| """Placeholder/template for staging envs."""
| en | 0.459287 | Placeholder/template for staging envs. | 1.047625 | 1 |
tests/test_command.py | vandana-11/cognito | 0 | 499 | <reponame>vandana-11/cognito
from cognito.check import Check
from cognito.table import Table
import os
import pytest
import pandas as pd
import numpy as np
from os import path
from sklearn import preprocessing
| from cognito.check import Check
from cognito.table import Table
import os
import pytest
import pandas as pd
import numpy as np
from os import path
from sklearn import preprocessing | none | 1 | 1.164437 | 1 |