max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
pysrc/classifier.py | CrackerCat/xed | 1,261 | 2610 | #!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import re
import genutil
import codegen
def _emit_function(fe, isa_sets, name):
fo = codegen.function_object_t('xed_classify_{}'.format(name))
fo.add_arg('const xed_decoded_inst_t* d')
fo.add_code_eol(' const xed_isa_set_enum_t isa_set = xed_decoded_inst_get_isa_set(d)')
# FIXME: 2017-07-14 optimization: could use a static array for faster checking, smaller code
switch = codegen.c_switch_generator_t('isa_set', fo)
isa_sets_sorted = sorted(isa_sets)
for c in isa_sets_sorted:
switch.add_case('XED_ISA_SET_{}'.format(c.upper()),[],do_break=False)
if len(isa_sets) > 0:
switch.add('return 1;')
switch.add_default(['return 0;'], do_break=False)
switch.finish()
fo.emit_file_emitter(fe)
def work(agi):
sse_isa_sets = set([])
avx_isa_sets = set([])
avx512_isa_sets = set([])
avx512_kmask_op = set([])
for generator in agi.generator_list:
for ii in generator.parser_output.instructions:
if genutil.field_check(ii, 'iclass'):
if re.search('AVX512',ii.isa_set):
avx512_isa_sets.add(ii.isa_set)
if re.search('KOP',ii.isa_set):
avx512_kmask_op.add(ii.isa_set)
elif re.search('AVX',ii.isa_set) or ii.isa_set in ['F16C', 'FMA']:
avx_isa_sets.add(ii.isa_set)
elif re.search('SSE',ii.isa_set) or ii.isa_set in ['AES','PCLMULQDQ']:
# Exclude MMX instructions that come in with SSE2 &
# SSSE3. The several purely MMX instr in SSE are
# "SSE-opcodes" with memop operands. One can look for
# those with SSE2MMX and SSSE3MMX xed isa_sets.
#
# Also exclude the SSE_PREFETCH operations; Those are
# just memops.
if (not re.search('MMX',ii.isa_set) and not re.search('PREFETCH',ii.isa_set)
and not re.search('X87',ii.isa_set) and not re.search('MWAIT',ii.isa_set)):
sse_isa_sets.add(ii.isa_set)
fe = agi.open_file('xed-classifiers.c') # xed_file_emitter_t
_emit_function(fe, avx512_isa_sets, 'avx512')
_emit_function(fe, avx512_kmask_op, 'avx512_maskop')
_emit_function(fe, avx_isa_sets, 'avx')
_emit_function(fe, sse_isa_sets, 'sse')
fe.close()
return
|
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py | rocheparadox/InvenTree | 656 | 2616 | """
Custom management command to rebuild thumbnail images
- May be required after importing a new dataset, for example
"""
import os
import logging
from PIL import UnidentifiedImageError
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.utils import OperationalError, ProgrammingError
from company.models import Company
from part.models import Part
logger = logging.getLogger("inventree-thumbnails")
class Command(BaseCommand):
"""
Rebuild all thumbnail images
"""
def rebuild_thumbnail(self, model):
"""
Rebuild the thumbnail specified by the "image" field of the provided model
"""
if not model.image:
return
img = model.image
url = img.thumbnail.name
loc = os.path.join(settings.MEDIA_ROOT, url)
if not os.path.exists(loc):
logger.info(f"Generating thumbnail image for '{img}'")
try:
model.image.render_variations(replace=False)
except FileNotFoundError:
logger.error(f"ERROR: Image file '{img}' is missing")
except UnidentifiedImageError:
logger.error(f"ERROR: Image file '{img}' is not a valid image")
def handle(self, *args, **kwargs):
logger.setLevel(logging.INFO)
logger.info("Rebuilding Part thumbnails")
for part in Part.objects.exclude(image=None):
try:
self.rebuild_thumbnail(part)
except (OperationalError, ProgrammingError):
logger.error("ERROR: Database read error.")
break
logger.info("Rebuilding Company thumbnails")
for company in Company.objects.exclude(image=None):
try:
self.rebuild_thumbnail(company)
except (OperationalError, ProgrammingError):
logger.error("ERROR: abase read error.")
break
|
glue/core/data_factories/tables.py | rosteen/glue | 550 | 2620 | from glue.core.data_factories.helpers import has_extension
from glue.config import data_factory
__all__ = ['tabular_data']
@data_factory(label="ASCII Table",
identifier=has_extension('csv txt tsv tbl dat '
'csv.gz txt.gz tbl.bz '
'dat.gz'),
priority=1)
def tabular_data(path, **kwargs):
from glue.core.data_factories.astropy_table import astropy_tabular_data
from glue.core.data_factories.pandas import pandas_read_table
for fac in [astropy_tabular_data, pandas_read_table]:
try:
return fac(path, **kwargs)
except Exception:
pass
else:
raise IOError("Could not parse file: %s" % path)
|
tests/test_tree.py | andreax79/airflow-code-editor | 194 | 2642 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import os.path
import airflow
import airflow.plugins_manager
from airflow import configuration
from flask import Flask
from unittest import TestCase, main
from airflow_code_editor.commons import PLUGIN_NAME
from airflow_code_editor.tree import (
get_tree,
)
assert airflow.plugins_manager
app = Flask(__name__)
class TestTree(TestCase):
def setUp(self):
self.root_dir = os.path.dirname(os.path.realpath(__file__))
configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')
configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir)
def test_tree(self):
with app.app_context():
t = get_tree()
self.assertTrue(len(t) > 0)
self.assertTrue('git' in (x['id'] for x in t))
def test_tags(self):
with app.app_context():
t = get_tree("tags")
self.assertIsNotNone(t)
def test_local_branches(self):
with app.app_context():
t = get_tree("local-branches")
self.assertIsNotNone(t)
def test_remote_branches(self):
with app.app_context():
t = get_tree("remote-branches")
self.assertIsNotNone(t)
def test_files(self):
with app.app_context():
t = get_tree("files")
self.assertTrue(
len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1
)
t = get_tree("files/folder")
self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1)
def test_git(self):
with app.app_context():
t = get_tree("git/HEAD")
self.assertTrue(t is not None)
class TestTreeGitDisabled(TestCase):
def setUp(self):
self.root_dir = os.path.dirname(os.path.realpath(__file__))
configuration.conf.set(PLUGIN_NAME, 'git_init_repo', 'False')
configuration.conf.set(PLUGIN_NAME, 'root_directory', self.root_dir)
configuration.conf.set(PLUGIN_NAME, 'git_enabled', 'False')
def test_tree(self):
with app.app_context():
t = get_tree()
self.assertTrue(len(t) > 0)
self.assertTrue('git' not in (x['id'] for x in t))
t = get_tree("tags")
self.assertEqual(t, [])
t = get_tree("local-branches")
self.assertEqual(t, [])
t = get_tree("remote-branches")
self.assertEqual(t, [])
t = get_tree("files")
self.assertTrue(
len([x.get('id') for x in t if x.get('id') == 'test_utils.py']) == 1
)
t = get_tree("files/folder")
self.assertTrue(len([x.get('id') for x in t if x.get('id') == '1']) == 1)
if __name__ == '__main__':
main()
|
src/moduels/gui/Tab_Help.py | HaujetZhao/Caps_Writer | 234 | 2653 | <reponame>HaujetZhao/Caps_Writer<filename>src/moduels/gui/Tab_Help.py
# -*- coding: UTF-8 -*-
from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout
from PySide2.QtCore import Signal
from moduels.component.NormalValue import 常量
from moduels.component.SponsorDialog import SponsorDialog
import os, webbrowser
class Tab_Help(QWidget):
状态栏消息 = Signal(str, int)
def __init__(self):
super().__init__()
self.initElement() # 先初始化各个控件
self.initSlots() # 再将各个控件连接到信号槽
self.initLayout() # 然后布局
self.initValue() # 再定义各个控件的值
def initElement(self):
self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档'))
self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记'))
self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程'))
self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))
self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))
self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群'))
self.tipButton = QPushButton(self.tr('打赏作者'))
self.masterLayout = QVBoxLayout()
def initSlots(self):
self.打开帮助按钮.clicked.connect(self.openHelpDocument)
self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))
self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))
self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))
self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))
self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open(
self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))
self.tipButton.clicked.connect(lambda: SponsorDialog(self))
def initLayout(self):
self.setLayout(self.masterLayout)
# self.masterLayout.addWidget(self.打开帮助按钮)
# self.masterLayout.addWidget(self.ffmpegMannualNoteButton)
self.masterLayout.addWidget(self.openVideoHelpButtone)
self.masterLayout.addWidget(self.openGiteePage)
self.masterLayout.addWidget(self.openGithubPage)
self.masterLayout.addWidget(self.linkToDiscussPage)
self.masterLayout.addWidget(self.tipButton)
def initValue(self):
self.打开帮助按钮.setMaximumHeight(100)
self.ffmpegMannualNoteButton.setMaximumHeight(100)
self.openVideoHelpButtone.setMaximumHeight(100)
self.openGiteePage.setMaximumHeight(100)
self.openGithubPage.setMaximumHeight(100)
self.linkToDiscussPage.setMaximumHeight(100)
self.tipButton.setMaximumHeight(100)
def openHelpDocument(self):
try:
if 常量.系统平台 == 'Darwin':
import shlex
os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html")))
elif 常量.系统平台 == 'Windows':
os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))
except:
print('未能打开帮助文档')
|
tests/unittests/command_parse/test_stream.py | itamarhaber/iredis | 1,857 | 2659 | def test_xrange(judge_command):
judge_command(
"XRANGE somestream - +",
{"command": "XRANGE", "key": "somestream", "stream_id": ["-", "+"]},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
},
)
judge_command(
"XRANGE somestream 1526985054069 1526985055069-10 count 10",
{
"command": "XRANGE",
"key": "somestream",
"stream_id": ["1526985054069", "1526985055069-10"],
"count_const": "count",
"count": "10",
},
)
def test_xgroup_create(judge_command):
judge_command(
"XGROUP CREATE mykey mygroup 123",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP CREATE mykey mygroup $",
{
"command": "XGROUP",
"stream_create": "CREATE",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# short of a parameter
judge_command("XGROUP CREATE mykey mygroup", None)
judge_command("XGROUP CREATE mykey", None)
def test_xgroup_setid(judge_command):
judge_command(
"XGROUP SETID mykey mygroup 123",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "123",
},
)
judge_command(
"XGROUP SETID mykey mygroup $",
{
"command": "XGROUP",
"stream_setid": "SETID",
"key": "mykey",
"group": "mygroup",
"stream_id": "$",
},
)
# two subcommand together shouldn't match
judge_command("XGROUP CREATE mykey mygroup 123 SETID mykey mygroup $", None)
def test_xgroup_destroy(judge_command):
judge_command(
"XGROUP destroy mykey mygroup",
{
"command": "XGROUP",
"stream_destroy": "destroy",
"key": "mykey",
"group": "mygroup",
},
)
judge_command("XGROUP destroy mykey", None)
judge_command("XGROUP DESTROY mykey mygroup $", None)
def test_xgroup_delconsumer(judge_command):
judge_command(
"XGROUP delconsumer mykey mygroup myconsumer",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "myconsumer",
},
)
judge_command(
"XGROUP delconsumer mykey mygroup $",
{
"command": "XGROUP",
"stream_delconsumer": "delconsumer",
"key": "mykey",
"group": "mygroup",
"consumer": "$",
},
)
judge_command("XGROUP delconsumer mykey mygroup", None)
def test_xgroup_stream(judge_command):
judge_command(
"XACK mystream group1 123123",
{
"command": "XACK",
"key": "mystream",
"group": "group1",
"stream_id": "123123",
},
)
judge_command(
"XACK mystream group1 123123 111",
{"command": "XACK", "key": "mystream", "group": "group1", "stream_id": "111"},
)
def test_xinfo(judge_command):
judge_command(
"XINFO consumers mystream mygroup",
{
"command": "XINFO",
"stream_consumers": "consumers",
"key": "mystream",
"group": "mygroup",
},
)
judge_command(
"XINFO GROUPS mystream",
{"command": "XINFO", "stream_groups": "GROUPS", "key": "mystream"},
)
judge_command(
"XINFO STREAM mystream",
{"command": "XINFO", "stream": "STREAM", "key": "mystream"},
)
judge_command("XINFO HELP", {"command": "XINFO", "help": "HELP"})
judge_command("XINFO consumers mystream mygroup GROUPS mystream", None)
judge_command("XINFO groups mystream mygroup", None)
def test_xinfo_with_full(judge_command):
judge_command(
"XINFO STREAM mystream FULL",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
},
)
judge_command(
"XINFO STREAM mystream FULL count 10",
{
"command": "XINFO",
"stream": "STREAM",
"key": "mystream",
"full_const": "FULL",
"count_const": "count",
"count": "10",
},
)
def test_xpending(judge_command):
judge_command(
"XPENDING mystream group55",
{"command": "XPENDING", "key": "mystream", "group": "group55"},
)
judge_command(
"XPENDING mystream group55 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"consumer": "myconsumer",
},
)
judge_command(
"XPENDING mystream group55 - + 10",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
},
)
judge_command(
"XPENDING mystream group55 - + 10 myconsumer",
{
"command": "XPENDING",
"key": "mystream",
"group": "group55",
"stream_id": ["-", "+"],
"count": "10",
"consumer": "myconsumer",
},
)
judge_command("XPENDING mystream group55 - + ", None)
def test_xadd(judge_command):
judge_command(
"xadd mystream MAXLEN ~ 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"approximately": "~",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# test for MAXLEN option
judge_command(
"xadd mystream MAXLEN 1000 * key value",
{
"command": "xadd",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "1000",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
judge_command(
"xadd mystream * key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "*",
},
)
# spcify stream id
judge_command(
"xadd mystream 123-123 key value",
{
"command": "xadd",
"key": "mystream",
"sfield": "key",
"svalue": "value",
"stream_id": "123-123",
},
)
judge_command(
"xadd mystream 123-123 key value foo bar hello world",
{
"command": "xadd",
"key": "mystream",
"sfield": "hello",
"svalue": "world",
"stream_id": "123-123",
},
)
def test_xtrim(judge_command):
judge_command(
" XTRIM mystream MAXLEN 2",
{"command": "XTRIM", "key": "mystream", "maxlen": "MAXLEN", "count": "2"},
)
judge_command(
" XTRIM mystream MAXLEN ~ 2",
{
"command": "XTRIM",
"key": "mystream",
"maxlen": "MAXLEN",
"count": "2",
"approximately": "~",
},
)
judge_command(" XTRIM mystream", None)
def test_xdel(judge_command):
judge_command(
"XDEL mystream 1581165000000 1549611229000 1581060831000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581060831000"},
)
judge_command(
"XDEL mystream 1581165000000",
{"command": "XDEL", "key": "mystream", "stream_id": "1581165000000"},
)
def test_xclaim(judge_command):
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 123 456 789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 IDEL 300",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": ["3600000", "300"],
"stream_id": "1526569498055-0",
"idel": "IDEL",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 retrycount 7",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"retrycount": "retrycount",
"count": "7",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 TIME 123456789",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"time": "TIME",
"timestamp": "123456789",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 FORCE",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"force": "FORCE",
},
)
judge_command(
"XCLAIM mystream mygroup Alice 3600000 1526569498055-0 JUSTID",
{
"command": "XCLAIM",
"key": "mystream",
"group": "mygroup",
"consumer": "Alice",
"millisecond": "3600000",
"stream_id": "1526569498055-0",
"justid": "JUSTID",
},
)
def test_xread(judge_command):
judge_command(
"XREAD COUNT 2 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
# FIXME current grammar can't support multiple tokens
# so the ids will be recongized to keys.
"keys": "mystream writers 0-0",
"stream_id": "0-0",
},
)
judge_command(
"XREAD COUNT 2 BLOCK 1000 STREAMS mystream writers 0-0 0-0",
{
"command": "XREAD",
"count_const": "COUNT",
"count": "2",
"streams": "STREAMS",
"keys": "mystream writers 0-0",
"block": "BLOCK",
"millisecond": "1000",
"stream_id": "0-0",
},
)
def test_xreadgroup(judge_command):
judge_command(
"XREADGROUP GROUP mygroup1 Bob COUNT 1 BLOCK 100 NOACK STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"count_const": "COUNT",
"count": "1",
"block": "BLOCK",
"millisecond": "100",
"noack": "NOACK",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command(
"XREADGROUP GROUP mygroup1 Bob STREAMS key1 1 key2 2",
{
"command": "XREADGROUP",
"stream_group": "GROUP",
"group": "mygroup1",
"consumer": "Bob",
"streams": "STREAMS",
"keys": "key1 1 key2",
"stream_id": "2",
},
)
judge_command("XREADGROUP GROUP group consumer", None)
|
Giveme5W1H/extractor/tools/key_value_cache.py | bkrrr/Giveme5W | 410 | 2671 | import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
|
train_dv3.py | drat/Neural-Voice-Cloning-With-Few-Samples | 361 | 2690 | <reponame>drat/Neural-Voice-Cloning-With-Few-Samples
"""Trainining script for seq2seq text-to-speech synthesis model.
usage: train.py [options]
options:
--data-root=<dir> Directory contains preprocessed features.
--checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].
--hparams=<parmas> Hyper parameters [default: ].
--checkpoint=<path> Restore model from checkpoint path if given.
--checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path.
--checkpoint-postnet=<path> Restore postnet model from checkpoint path.
--train-seq2seq-only Train only seq2seq model.
--train-postnet-only Train only postnet model.
--restore-parts=<path> Restore part of the model.
--log-event-path=<name> Log event path.
--reset-optimizer Reset optimizer.
--load-embedding=<path> Load embedding from checkpoint.
--speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets.
-h, --help Show this help message and exit
"""
from docopt import docopt
import sys
from os.path import dirname, join
from tqdm import tqdm, trange
from datetime import datetime
# The deepvoice3 model
from dv3.deepvoice3_pytorch import frontend, builder
import dv3.audio
import dv3.lrschedule
import torch
from torch.utils import data as data_utils
from torch.autograd import Variable
from torch import nn
from torch import optim
import torch.backends.cudnn as cudnn
from torch.utils import data as data_utils
from torch.utils.data.sampler import Sampler
import numpy as np
from numba import jit
from nnmnkwii.datasets import FileSourceDataset, FileDataSource
from os.path import join, expanduser
import random
import librosa.display
from matplotlib import pyplot as plt
import sys
import os
from tensorboardX import SummaryWriter
from matplotlib import cm
from warnings import warn
from dv3.hparams import hparams, hparams_debug_string
fs = hparams.sample_rate
global_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
cudnn.benchmark = False
_frontend = None # to be set later
def _pad(seq, max_len, constant_values=0):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=constant_values)
def _pad_2d(x, max_len, b_pad=0):
x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],
mode="constant", constant_values=0)
return x
def plot_alignment(alignment, path, info=None):
fig, ax = plt.subplots()
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
class TextDataSource(FileDataSource):
def __init__(self, data_root, speaker_id=None):
self.data_root = data_root
self.speaker_ids = None
self.multi_speaker = False
# If not None, filter by speaker_id
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
self.multi_speaker = len(l) == 5
texts = list(map(lambda l: l.decode("utf-8").split("|")[3], lines))
if self.multi_speaker:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
if self.speaker_id is not None:
indices = np.array(speaker_ids) == self.speaker_id
texts = list(np.array(texts)[indices])
self.multi_speaker = False
return texts
return texts, speaker_ids
else:
return texts
def collect_features(self, *args):
if self.multi_speaker:
text, speaker_id = args
else:
text = args[0]
seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)
if self.multi_speaker:
return np.asarray(seq, dtype=np.int32), int(speaker_id)
else:
return np.asarray(seq, dtype=np.int32)
class _NPYDataSource(FileDataSource):
def __init__(self, data_root, col, speaker_id=None):
self.data_root = data_root
self.col = col
self.frame_lengths = []
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
multi_speaker = len(l) == 5
self.frame_lengths = list(
map(lambda l: int(l.decode("utf-8").split("|")[2]), lines))
paths = list(map(lambda l: l.decode("utf-8").split("|")[self.col], lines))
paths = list(map(lambda f: join(self.data_root, f), paths))
if multi_speaker and self.speaker_id is not None:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
indices = np.array(speaker_ids) == self.speaker_id
paths = list(np.array(paths)[indices])
self.frame_lengths = list(np.array(self.frame_lengths)[indices])
# aha, need to cast numpy.int64 to int
self.frame_lengths = list(map(int, self.frame_lengths))
return paths
def collect_features(self, path):
return np.load(path)
class MelSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)
class LinearSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)
class PartialyRandomizedSimilarTimeLengthSampler(Sampler):
"""Partially randmoized sampler
1. Sort by lengths
2. Pick a small patch and randomize it
3. Permutate mini-batchs
"""
def __init__(self, lengths, batch_size=16, batch_group_size=None,
permutate=True):
self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))
self.batch_size = batch_size
if batch_group_size is None:
batch_group_size = min(batch_size * 32, len(self.lengths))
if batch_group_size % batch_size != 0:
batch_group_size -= batch_group_size % batch_size
self.batch_group_size = batch_group_size
assert batch_group_size % batch_size == 0
self.permutate = permutate
def __iter__(self):
indices = self.sorted_indices.clone()
batch_group_size = self.batch_group_size
s, e = 0, 0
for i in range(len(indices) // batch_group_size):
s = i * batch_group_size
e = s + batch_group_size
random.shuffle(indices[s:e])
# Permutate batches
if self.permutate:
perm = np.arange(len(indices[:e]) // self.batch_size)
random.shuffle(perm)
indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)
# Handle last elements
s += batch_group_size
if s < len(indices):
random.shuffle(indices[s:])
return iter(indices)
def __len__(self):
return len(self.sorted_indices)
class PyTorchDataset(object):
def __init__(self, X, Mel, Y):
self.X = X
self.Mel = Mel
self.Y = Y
# alias
self.multi_speaker = X.file_data_source.multi_speaker
def __getitem__(self, idx):
if self.multi_speaker:
text, speaker_id = self.X[idx]
return text, self.Mel[idx], self.Y[idx], speaker_id
else:
return self.X[idx], self.Mel[idx], self.Y[idx]
def __len__(self):
return len(self.X)
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).float()
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss(size_average=False)
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, D)
mask_ = mask.expand_as(input)
loss = self.criterion(input * mask_, target * mask_)
return loss / mask_.sum()
def collate_fn(batch):
"""Create batch"""
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
multi_speaker = len(batch[0]) == 4
# Lengths
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
target_lengths = [len(x[1]) for x in batch]
max_target_len = max(target_lengths)
if max_target_len % r != 0:
max_target_len += r - max_target_len % r
assert max_target_len % r == 0
if max_target_len % downsample_step != 0:
max_target_len += downsample_step - max_target_len % downsample_step
assert max_target_len % downsample_step == 0
# Set 0 for zero beginning padding
# imitates initial decoder states
b_pad = r
max_target_len += b_pad * downsample_step
a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)
x_batch = torch.LongTensor(a)
input_lengths = torch.LongTensor(input_lengths)
target_lengths = torch.LongTensor(target_lengths)
b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
mel_batch = torch.FloatTensor(b)
c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
y_batch = torch.FloatTensor(c)
# text positions
text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)
for x in batch], dtype=np.int)
text_positions = torch.LongTensor(text_positions)
max_decoder_target_len = max_target_len // r // downsample_step
# frame positions
s, e = 1, max_decoder_target_len + 1
# if b_pad > 0:
# s, e = s - 1, e - 1
frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(
len(batch), max_decoder_target_len)
# done flags
done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),
max_decoder_target_len, constant_values=1)
for x in batch])
done = torch.FloatTensor(done).unsqueeze(-1)
if multi_speaker:
speaker_ids = torch.LongTensor([x[3] for x in batch])
else:
speaker_ids = None
return x_batch, input_lengths, mel_batch, y_batch, \
(text_positions, frame_positions), done, target_lengths, speaker_ids
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def save_alignment(path, attn):
plot_alignment(attn.T, path, info="{}, {}, step={}".format(
hparams.builder, time_string(), global_step))
def prepare_spec_image(spectrogram):
# [0, 1]
spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))
spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis
return np.uint8(cm.magma(spectrogram.T) * 255)
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
# harded coded
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"Generative adversarial network or variational auto-encoder.",
"Please call Stella.",
"Some have accepted this as a miracle without any physical explanation.",
]
import dv3.synthesis
synthesis._frontend = _frontend
eval_output_dir = join(checkpoint_dir, "eval")
os.makedirs(eval_output_dir, exist_ok=True)
# hard coded
speaker_ids = [0, 1, 10] if ismultispeaker else [None]
for speaker_id in speaker_ids:
speaker_str = "multispeaker{}".format(speaker_id) if speaker_id is not None else "single"
for idx, text in enumerate(texts):
signal, alignment, _, mel = synthesis.tts(
model, text, p=0, speaker_id=speaker_id, fast=False)
signal /= np.max(np.abs(signal))
# Alignment
path = join(eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
global_step, idx, speaker_str))
save_alignment(path, alignment)
tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Mel
writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx, speaker_str),
prepare_spec_image(mel), global_step)
# Audio
path = join(eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
global_step, idx, speaker_str))
dv3.audio.save_wav(signal, path)
try:
writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx, speaker_str),
signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,
input_lengths, checkpoint_dir=None):
print("Save intermediate states at step {}".format(global_step))
# idx = np.random.randint(0, len(input_lengths))
idx = min(1, len(input_lengths) - 1)
input_length = input_lengths[idx]
# Alignment
# Multi-hop attention
if attn is not None and attn.dim() == 4:
for i, alignment in enumerate(attn):
alignment = alignment[idx].cpu().data.numpy()
tag = "alignment_layer{}".format(i + 1)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# save files as well for now
alignment_dir = join(checkpoint_dir, "alignment_layer{}".format(i + 1))
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_layer_{}_alignment.png".format(
global_step, i + 1))
save_alignment(path, alignment)
# Save averaged alignment
alignment_dir = join(checkpoint_dir, "alignment_ave")
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_alignment.png".format(global_step))
alignment = attn.mean(0)[idx].cpu().data.numpy()
save_alignment(path, alignment)
tag = "averaged_alignment"
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Predicted mel spectrogram
if mel_outputs is not None:
mel_output = mel_outputs[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Predicted mel spectrogram", mel_output, global_step)
# Predicted spectrogram
if linear_outputs is not None:
linear_output = linear_outputs[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Predicted linear spectrogram", spectrogram, global_step)
# Predicted audio signal
signal = dv3.audio.inv_spectrogram(linear_output.T)
signal /= np.max(np.abs(signal))
path = join(checkpoint_dir, "step{:09d}_predicted.wav".format(
global_step))
try:
writer.add_audio("Predicted audio signal", signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
dv3.audio.save_wav(signal, path)
# Target mel spectrogram
if mel_outputs is not None:
mel_output = mel[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Target mel spectrogram", mel_output, global_step)
# Target spectrogram
if linear_outputs is not None:
linear_output = y[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Target linear spectrogram", spectrogram, global_step)
def logit(x, eps=1e-8):
return torch.log(x + eps) - torch.log(1 - x + eps)
def masked_mean(y, mask):
# (B, T, D)
mask_ = mask.expand_as(y)
return (y * mask_).sum() / mask_.sum()
def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):
masked_l1 = MaskedL1Loss()
l1 = nn.L1Loss()
w = hparams.masked_loss_weight
# L1 loss
if w > 0:
assert mask is not None
l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)
else:
assert mask is None
l1_loss = l1(y_hat, y)
# Priority L1 loss
if priority_bin is not None and priority_w > 0:
if w > 0:
priority_loss = w * masked_l1(
y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \
+ (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
else:
priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss
# Binary divergence loss
if hparams.binary_divergence_weight <= 0:
binary_div = Variable(y.data.new(1).zero_())
else:
y_hat_logits = logit(y_hat)
z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits))
if w > 0:
binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean()
else:
binary_div = z.mean()
return l1_loss, binary_div
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):
B = len(input_lengths)
max_input_len = input_lengths.max()
W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(input_lengths[b], max_input_len,
target_lengths[b], max_target_len, g).T
return W
def train(model, data_loader, optimizer, writer,
init_lr=0.002,
checkpoint_dir=None, checkpoint_interval=None, nepochs=None,
clip_thresh=1.0,
train_seq2seq=True, train_postnet=True):
if use_cuda:
model = model.cuda()
linear_dim = model.linear_dim
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
current_lr = init_lr
binary_criterion = nn.BCELoss()
assert train_seq2seq or train_postnet
global global_step, global_epoch
while global_epoch < nepochs:
running_loss = 0.
for step, (x, input_lengths, mel, y, positions, done, target_lengths,
speaker_ids) \
in tqdm(enumerate(data_loader)):
model.train()
ismultispeaker = speaker_ids is not None
# Learning rate schedule
if hparams.lr_schedule is not None:
lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule)
current_lr = lr_schedule_f(
init_lr, global_step, **hparams.lr_schedule_kwargs)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
# Used for Position encoding
text_positions, frame_positions = positions
# Downsample mel spectrogram
if downsample_step > 1:
mel = mel[:, 0::downsample_step, :].contiguous()
# Lengths
input_lengths = input_lengths.long().numpy()
decoder_lengths = target_lengths.long().numpy() // r // downsample_step
# Feed data
x, mel, y = Variable(x), Variable(mel), Variable(y)
text_positions = Variable(text_positions)
frame_positions = Variable(frame_positions)
done = Variable(done)
target_lengths = Variable(target_lengths)
speaker_ids = Variable(speaker_ids) if ismultispeaker else None
if use_cuda:
if train_seq2seq:
x = x.cuda()
text_positions = text_positions.cuda()
frame_positions = frame_positions.cuda()
if train_postnet:
y = y.cuda()
mel = mel.cuda()
done, target_lengths = done.cuda(), target_lengths.cuda()
speaker_ids = speaker_ids.cuda() if ismultispeaker else None
# Create mask if we use masked loss
if hparams.masked_loss_weight > 0:
# decoder output domain mask
decoder_target_mask = sequence_mask(
target_lengths / (r * downsample_step),
max_len=mel.size(1)).unsqueeze(-1)
if downsample_step > 1:
# spectrogram-domain mask
target_mask = sequence_mask(
target_lengths, max_len=y.size(1)).unsqueeze(-1)
else:
target_mask = decoder_target_mask
# shift mask
decoder_target_mask = decoder_target_mask[:, r:, :]
target_mask = target_mask[:, r:, :]
else:
decoder_target_mask, target_mask = None, None
# Apply model
if train_seq2seq and train_postnet:
mel_outputs, linear_outputs, attn, done_hat = model(
x, mel, speaker_ids=speaker_ids,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
elif train_seq2seq:
assert speaker_ids is None
mel_outputs, attn, done_hat, _ = model.seq2seq(
x, mel,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
# reshape
mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))
linear_outputs = None
elif train_postnet:
assert speaker_ids is None
linear_outputs = model.postnet(mel)
mel_outputs, attn, done_hat = None, None, None
# Losses
w = hparams.binary_divergence_weight
# mel:
if train_seq2seq:
mel_l1_loss, mel_binary_div = spec_loss(
mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)
mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div
# done:
if train_seq2seq:
done_loss = binary_criterion(done_hat, done)
# linear:
if train_postnet:
n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim)
linear_l1_loss, linear_binary_div = spec_loss(
linear_outputs[:, :-r, :], y[:, r:, :], target_mask,
priority_bin=n_priority_freq,
priority_w=hparams.priority_freq_weight)
linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div
# Combine losses
if train_seq2seq and train_postnet:
loss = mel_loss + linear_loss + done_loss
elif train_seq2seq:
loss = mel_loss + done_loss
elif train_postnet:
loss = linear_loss
# attention
if train_seq2seq and hparams.use_guided_attention:
soft_mask = guided_attentions(input_lengths, decoder_lengths,
attn.size(-2),
g=hparams.guided_attention_sigma)
soft_mask = Variable(torch.from_numpy(soft_mask))
soft_mask = soft_mask.cuda() if use_cuda else soft_mask
attn_loss = (attn * soft_mask).mean()
loss += attn_loss
if global_step > 0 and global_step % checkpoint_interval == 0:
save_states(
global_step, writer, mel_outputs, linear_outputs, attn,
mel, y, input_lengths, checkpoint_dir)
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
if global_step > 0 and global_step % hparams.eval_interval == 0:
eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker)
# Update
loss.backward()
if clip_thresh > 0:
grad_norm = torch.nn.utils.clip_grad_norm(
model.get_trainable_parameters(), clip_thresh)
optimizer.step()
# Logs
writer.add_scalar("loss", float(loss.data[0]), global_step)
if train_seq2seq:
writer.add_scalar("done_loss", float(done_loss.data[0]), global_step)
writer.add_scalar("mel loss", float(mel_loss.data[0]), global_step)
writer.add_scalar("mel_l1_loss", float(mel_l1_loss.data[0]), global_step)
writer.add_scalar("mel_binary_div_loss", float(mel_binary_div.data[0]), global_step)
if train_postnet:
writer.add_scalar("linear_loss", float(linear_loss.data[0]), global_step)
writer.add_scalar("linear_l1_loss", float(linear_l1_loss.data[0]), global_step)
writer.add_scalar("linear_binary_div_loss", float(
linear_binary_div.data[0]), global_step)
if train_seq2seq and hparams.use_guided_attention:
writer.add_scalar("attn_loss", float(attn_loss.data[0]), global_step)
if clip_thresh > 0:
writer.add_scalar("gradient norm", grad_norm, global_step)
writer.add_scalar("learning rate", current_lr, global_step)
global_step += 1
running_loss += loss.data[0]
averaged_loss = running_loss / (len(data_loader))
writer.add_scalar("loss (per epoch)", averaged_loss, global_epoch)
print("Loss: {}".format(running_loss / (len(data_loader))))
global_epoch += 1
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,
train_seq2seq, train_postnet):
if train_seq2seq and train_postnet:
suffix = ""
m = model
elif train_seq2seq:
suffix = "_seq2seq"
m = model.seq2seq
elif train_postnet:
suffix = "_postnet"
m = model.postnet
checkpoint_path = join(
checkpoint_dir, "checkpoint_step{:09d}{}.pth".format(global_step, suffix))
optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None
torch.save({
"state_dict": m.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
def build_model():
model = getattr(builder, hparams.builder)(
n_speakers=hparams.n_speakers,
speaker_embed_dim=hparams.speaker_embed_dim,
n_vocab=_frontend.n_vocab,
embed_dim=hparams.text_embed_dim,
mel_dim=hparams.num_mels,
linear_dim=hparams.fft_size // 2 + 1,
r=hparams.outputs_per_step,
downsample_step=hparams.downsample_step,
padding_idx=hparams.padding_idx,
dropout=hparams.dropout,
kernel_size=hparams.kernel_size,
encoder_channels=hparams.encoder_channels,
decoder_channels=hparams.decoder_channels,
converter_channels=hparams.converter_channels,
use_memory_mask=hparams.use_memory_mask,
trainable_positional_encodings=hparams.trainable_positional_encodings,
force_monotonic_attention=hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,
max_positions=hparams.max_positions,
speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,
freeze_embedding=hparams.freeze_embedding,
window_ahead=hparams.window_ahead,
window_backward=hparams.window_backward,
key_projection=hparams.key_projection,
value_projection=hparams.value_projection,
)
return model
def load_checkpoint(path, model, optimizer, reset_optimizer):
global global_step
global global_epoch
print("Load checkpoint from: {}".format(path))
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["state_dict"])
if not reset_optimizer:
optimizer_state = checkpoint["optimizer"]
if optimizer_state is not None:
print("Load optimizer state from {}".format(path))
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
return model
def _load_embedding(path, model):
state = torch.load(path)["state_dict"]
key = "seq2seq.encoder.embed_tokens.weight"
model.seq2seq.encoder.embed_tokens.weight.data = state[key]
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3
def restore_parts(path, model):
print("Restore part of the model from: {}".format(path))
state = torch.load(path)["state_dict"]
model_dict = model.state_dict()
valid_state_dict = {k: v for k, v in state.items() if k in model_dict}
model_dict.update(valid_state_dict)
model.load_state_dict(model_dict)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
checkpoint_dir = args["--checkpoint-dir"]
checkpoint_path = args["--checkpoint"]
checkpoint_seq2seq_path = args["--checkpoint-seq2seq"]
checkpoint_postnet_path = args["--checkpoint-postnet"]
load_embedding = args["--load-embedding"]
checkpoint_restore_parts = args["--restore-parts"]
speaker_id = args["--speaker-id"]
speaker_id = int(speaker_id) if speaker_id is not None else None
data_root = args["--data-root"]
if data_root is None:
data_root = join(dirname(__file__), "data", "ljspeech")
log_event_path = args["--log-event-path"]
reset_optimizer = args["--reset-optimizer"]
# Which model to be trained
train_seq2seq = args["--train-seq2seq-only"]
train_postnet = args["--train-postnet-only"]
# train both if not specified
if not train_seq2seq and not train_postnet:
print("Training whole model")
train_seq2seq, train_postnet = True, True
if train_seq2seq:
print("Training seq2seq model")
elif train_postnet:
print("Training postnet model")
else:
assert False, "must be specified wrong args"
# Override hyper parameters
hparams.parse(args["--hparams"])
print(hparams_debug_string())
assert hparams.name == "deepvoice3"
# Presets
if hparams.preset is not None and hparams.preset != "":
preset = hparams.presets[hparams.preset]
import json
hparams.parse_json(json.dumps(preset))
print("Override hyper parameters with preset \"{}\": {}".format(
hparams.preset, json.dumps(preset, indent=4)))
_frontend = getattr(frontend, hparams.frontend)
os.makedirs(checkpoint_dir, exist_ok=True)
# Input dataset definitions
X = FileSourceDataset(TextDataSource(data_root, speaker_id))
Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))
Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))
# Prepare sampler
frame_lengths = Mel.file_data_source.frame_lengths
sampler = PartialyRandomizedSimilarTimeLengthSampler(
frame_lengths, batch_size=hparams.batch_size)
# Dataset and Dataloader setup
dataset = PyTorchDataset(X, Mel, Y)
data_loader = data_utils.DataLoader(
dataset, batch_size=hparams.batch_size,
num_workers=hparams.num_workers, sampler=sampler,
collate_fn=collate_fn, pin_memory=hparams.pin_memory)
print("dataloader_prepared")
# Model
model = build_model()
if use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.get_trainable_parameters(),
lr=hparams.initial_learning_rate, betas=(
hparams.adam_beta1, hparams.adam_beta2),
eps=hparams.adam_eps, weight_decay=hparams.weight_decay)
if checkpoint_restore_parts is not None:
restore_parts(checkpoint_restore_parts, model)
# Load checkpoints
if checkpoint_postnet_path is not None:
load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)
if checkpoint_seq2seq_path is not None:
load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)
if checkpoint_path is not None:
load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)
# Load embedding
if load_embedding is not None:
print("Loading embedding from {}".format(load_embedding))
_load_embedding(load_embedding, model)
# Setup summary writer for tensorboard
if log_event_path is None:
log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_")
print("Los event path: {}".format(log_event_path))
writer = SummaryWriter(log_dir=log_event_path)
# Train!
try:
train(model, data_loader, optimizer, writer,
init_lr=hparams.initial_learning_rate,
checkpoint_dir=checkpoint_dir,
checkpoint_interval=hparams.checkpoint_interval,
nepochs=hparams.nepochs,
clip_thresh=hparams.clip_thresh,
train_seq2seq=train_seq2seq, train_postnet=train_postnet)
except KeyboardInterrupt:
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
print("Finished")
sys.exit(0)
|
Fusion/deltat.py | coylen/pySG | 264 | 2716 | # deltat.py time difference calculation for sensor fusion
# Released under the MIT License (MIT)
# Copyright (c) 2018 <NAME>
# Provides TimeDiff function and DeltaT class.
# The following notes cover special cases. Where the device performing fusion
# is linked to the IMU and is running MicroPython no special treatment is
# needed.
# The special cases are:
# 1. Device connected to the IMU is linked to a separate platform doing fusion.
# 2. Either or both are not running MicroPython.
# If the device providing the vectors is not running on MicroPython the user
# must supply timestamps and a function capable of differencing these. The
# function is passed to the Fusion constructor and the timestamp is provided
# along with the vector, being the time when the vector was acquired.
# If the device providing the vectors is running MicroPython but fusion is
# being performed on a device which is not, the user must provide their own
# implementation of ticks_diff which accounts for MicroPython rollover and
# must supply the returned ticks_us() values as a timestamp.
# Under MicroPython TimeDiff(start, end) uses time.ticks_diff.
# A DeltaT instance, called with function call syntax, returns a time
# difference from the previous call as a float value. Units seconds.
# If running under MicroPython and no time differencing function is supplied
# to the Fusion constructor it uses time.ticks_us as its time source and a
# default timediff function using time.ticks_diff() with a division by 1e6.
# If time differencing function is supplied a timestamp must be passsed as an
# arg to instance calls of Fusion.update() or Fusion.update_nomag(). In the
# async version the user supplied read_coro() must return a timestamp with the
# vector.
# On 1st pass dt evidently can't be computed. A notional value of 100μs is
# returned. The Madgwick algorithm takes seconds to stabilise.
try:
import utime as time
except ImportError:
import time
is_micropython = hasattr(time, 'ticks_diff')
class DeltaT():
def __init__(self, timediff):
if timediff is None:
self.expect_ts = False
if is_micropython:
self.timediff = lambda start, end : time.ticks_diff(start, end)/1000000
else:
raise ValueError('You must define a timediff function')
else:
self.expect_ts = True
self.timediff = timediff
self.start_time = None
def __call__(self, ts):
if self.expect_ts:
if ts is None:
raise ValueError('Timestamp expected but not supplied.')
else:
if is_micropython:
ts = time.ticks_us()
else:
raise RuntimeError('Not MicroPython: provide timestamps and a timediff function')
# ts is now valid
if self.start_time is None: # 1st call: self.start_time is invalid
self.start_time = ts
return 0.0001 # 100μs notional delay. 1st reading is invalid in any case
dt = self.timediff(ts, self.start_time)
self.start_time = ts
return dt
|
fuzzers/ECP5/050-pio_routing/fuzzer.py | umarcor/prjtrellis | 256 | 2761 | <reponame>umarcor/prjtrellis
from fuzzconfig import FuzzConfig
import interconnect
import nets
import pytrellis
import re
jobs = [
{
"pos": [(47, 0), (48, 0), (49, 0)],
"cfg": FuzzConfig(job="PIOROUTEL", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R47C0:PICL0", "MIB_R48C0:PICL1", "MIB_R49C0:PICL2"])
},
{
"pos": [(47, 90), (48, 90), (49, 90)],
"cfg": FuzzConfig(job="PIOROUTER", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R47C90:PICR0", "MIB_R48C90:PICR1", "MIB_R49C90:PICR2"])
},
{
"pos": [(0, 22), (1, 23), (0, 22), (1, 23)],
"cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R0C22:PIOT0", "MIB_R0C23:PIOT1", "MIB_R1C22:PICT0", "MIB_R1C23:PICT1"])
},
{
"pos": [(71, 11), (71, 12), (70, 11), (70, 12)],
"cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R71C11:PICB0", "MIB_R71C12:PICB1"])
},
{
"pos": [(71, 18), (70, 18)],
"cfg": FuzzConfig(job="PIOROUTESB", family="ECP5", device="LFE5U-45F", ncl="pioroute_spicb.ncl",
tiles=["MIB_R71C18:SPICB0"])
},
]
def main():
pytrellis.load_database("../../../database")
for job in jobs:
cfg = job["cfg"]
cfg.setup()
def nn_filter(net, netnames):
return not nets.is_cib(net)
orig_tiles = cfg.tiles
for pos in job["pos"]:
# Put fixed connections in the most appropriate tile
target_tile = None
for tile in orig_tiles:
if "R{}C{}".format(pos[0], pos[1]) in tile:
target_tile = tile
break
if target_tile is not None:
cfg.tiles = [target_tile] + [_ for _ in orig_tiles if _ != orig_tiles]
else:
cfg.tiles = orig_tiles
interconnect.fuzz_interconnect(config=cfg, location=pos,
netname_predicate=nn_filter,
netname_filter_union=False,
func_cib=True)
if __name__ == "__main__":
main()
|
templates/integration/__init__.py | p7g/dd-trace-py | 308 | 2768 | """
The foo integration instruments the bar and baz features of the
foo library.
Enabling
~~~~~~~~
The foo integration is enabled automatically when using
:ref:`ddtrace-run <ddtracerun>` or :ref:`patch_all() <patch_all>`.
Or use :ref:`patch() <patch>` to manually enable the integration::
from ddtrace import patch
patch(foo=True)
Global Configuration
~~~~~~~~~~~~~~~~~~~~
.. py:data:: ddtrace.config.foo["service"]
The service name reported by default for foo instances.
This option can also be set with the ``DD_FOO_SERVICE`` environment
variable.
Default: ``"foo"``
Instance Configuration
~~~~~~~~~~~~~~~~~~~~~~
To configure the foo integration on an per-instance basis use the
``Pin`` API::
import foo
from ddtrace import Pin
myfoo = foo.Foo()
Pin.override(myfoo, service="myfoo")
"""
from ...internal.utils.importlib import require_modules
required_modules = ["foo"]
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .patch import patch
from .patch import unpatch
__all__ = ["patch", "unpatch"]
|
system/lib/update_musl.py | RyanCargan/emscripten | 6,541 | 2772 | #!/usr/bin/env python3
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Simple script for updating musl from external git repo.
The upstream sources, along with our local changes, live at:
https://github.com/emscripten-core/musl
To update musl first make sure all changes from the emscripten repo
are present in the `emscripten` branch of the above repo. Then run
`git merge v<musl_version>` to pull in the latest musl changes from
a given musl version. Once any merge conflict are resolved those
change can then be copied back into emscripten using this script.
"""
import os
import sys
import shutil
import subprocess
script_dir = os.path.abspath(os.path.dirname(__file__))
local_src = os.path.join(script_dir, 'libc', 'musl')
exclude_dirs = (
# Top level directories we don't include
'tools', 'obj', 'lib', 'crt', 'musl', 'compat',
# Parts of src we don't build
'malloc',
# Arch-specific code we don't use
'arm', 'x32', 'sh', 'i386', 'x86_64', 'aarch64', 'riscv64',
's390x', 'mips', 'mips64', 'mipsn32', 'powerpc', 'powerpc64',
'm68k', 'microblaze', 'or1k', 'generic')
musl_dir = os.path.abspath(sys.argv[1])
def should_ignore(name):
return name in exclude_dirs or name[0] == '.'
def ignore(dirname, contents):
return [c for c in contents if should_ignore(c)]
def main():
assert os.path.exists(musl_dir)
# Remove old version
shutil.rmtree(local_src)
# Copy new version into place
shutil.copytree(musl_dir, local_src, ignore=ignore)
if __name__ == '__main__':
main()
|
lldb/test/API/lang/swift/optimized_code/bound_generic_enum/TestSwiftOptimizedBoundGenericEnum.py | LaudateCorpus1/llvm-project | 605 | 2781 | <gh_stars>100-1000
import lldb
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbtest as lldbtest
import lldbsuite.test.lldbutil as lldbutil
import os
import unittest2
class TestSwiftOptimizedBoundGenericEnum(lldbtest.TestBase):
mydir = lldbtest.TestBase.compute_mydir(__file__)
@swiftTest
def test(self):
"""Test the bound generic enum types in "optimized" code."""
self.build()
target, process, thread, bkpt = lldbutil.run_to_source_breakpoint(self,
'break one', lldb.SBFileSpec('main.swift'))
bkpt_two = target.BreakpointCreateBySourceRegex(
'break two', lldb.SBFileSpec('main.swift'))
self.assertGreater(bkpt_two.GetNumLocations(), 0)
var_self = self.frame().FindVariable("self")
# FIXME, this fails with a data extractor error.
lldbutil.check_variable(self, var_self, False, value=None)
lldbutil.continue_to_breakpoint(process, bkpt_two)
var_self = self.frame().FindVariable("self")
lldbutil.check_variable(self, var_self, True, value="success")
|
hi-ml-histopathology/src/histopathology/preprocessing/tiling.py | kumar-pratik/hi-ml | 402 | 2798 | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
# These tiling implementations are adapted from PANDA Kaggle solutions, for example:
# https://github.com/kentaroy47/Kaggle-PANDA-1st-place-solution/blob/master/src/data_process/a00_save_tiles.py
from typing import Any, Optional, Tuple
import numpy as np
def get_1d_padding(length: int, tile_size: int) -> Tuple[int, int]:
"""Computes symmetric padding for `length` to be divisible by `tile_size`."""
pad = (tile_size - length % tile_size) % tile_size
return (pad // 2, pad - pad // 2)
def pad_for_tiling_2d(array: np.ndarray, tile_size: int, channels_first: Optional[bool] = True,
**pad_kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
"""Symmetrically pads a 2D `array` such that both dimensions are divisible by `tile_size`.
:param array: 2D image array.
:param tile_size: Width/height of each tile in pixels.
:param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout.
:param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`).
:return: A tuple containing:
- `padded_array`: Resulting array, in the same CHW/HWC layout as the input.
- `offset`: XY offset introduced by the padding. Add this to coordinates relative to the
original array to obtain indices for the padded array.
"""
height, width = array.shape[1:] if channels_first else array.shape[:-1]
padding_h = get_1d_padding(height, tile_size)
padding_w = get_1d_padding(width, tile_size)
padding = [padding_h, padding_w]
channels_axis = 0 if channels_first else 2
padding.insert(channels_axis, (0, 0)) # zero padding on channels axis
padded_array = np.pad(array, padding, **pad_kwargs)
offset = (padding_w[0], padding_h[0])
return padded_array, np.array(offset)
def tile_array_2d(array: np.ndarray, tile_size: int, channels_first: Optional[bool] = True,
**pad_kwargs: Any) -> Tuple[np.ndarray, np.ndarray]:
"""Split an image array into square non-overlapping tiles.
The array will be padded symmetrically if its dimensions are not exact multiples of `tile_size`.
:param array: Image array.
:param tile_size: Width/height of each tile in pixels.
:param pad_kwargs: Keyword arguments to be passed to `np.pad()` (e.g. `constant_values=0`).
:param channels_first: Whether `array` is in CHW (`True`, default) or HWC (`False`) layout.
:return: A tuple containing:
- `tiles`: A batch of tiles in NCHW layout.
- `coords`: XY coordinates of each tile, in the same order.
"""
padded_array, (offset_w, offset_h) = pad_for_tiling_2d(array, tile_size, channels_first, **pad_kwargs)
if channels_first:
channels, height, width = padded_array.shape
else:
height, width, channels = padded_array.shape
n_tiles_h = height // tile_size
n_tiles_w = width // tile_size
if channels_first:
intermediate_shape = (channels, n_tiles_h, tile_size, n_tiles_w, tile_size)
axis_order = (1, 3, 0, 2, 4) # (n_tiles_h, n_tiles_w, channels, tile_size, tile_size)
output_shape = (n_tiles_h * n_tiles_w, channels, tile_size, tile_size)
else:
intermediate_shape = (n_tiles_h, tile_size, n_tiles_w, tile_size, channels)
axis_order = (0, 2, 1, 3, 4) # (n_tiles_h, n_tiles_w, tile_size, tile_size, channels)
output_shape = (n_tiles_h * n_tiles_w, tile_size, tile_size, channels)
tiles = padded_array.reshape(intermediate_shape) # Split width and height axes
tiles = tiles.transpose(axis_order)
tiles = tiles.reshape(output_shape) # Flatten tile batch dimension
# Compute top-left coordinates of every tile, relative to the original array's origin
coords_h = tile_size * np.arange(n_tiles_h) - offset_h
coords_w = tile_size * np.arange(n_tiles_w) - offset_w
# Shape: (n_tiles_h * n_tiles_w, 2)
coords = np.stack(np.meshgrid(coords_w, coords_h), axis=-1).reshape(-1, 2)
return tiles, coords
def assemble_tiles_2d(tiles: np.ndarray, coords: np.ndarray, fill_value: Optional[float] = np.nan,
channels_first: Optional[bool] = True) -> Tuple[np.ndarray, np.ndarray]:
"""Assembles a 2D array from sequences of tiles and coordinates.
:param tiles: Stack of tiles with batch dimension first.
:param coords: XY tile coordinates, assumed to be spaced by multiples of `tile_size` (shape: [N, 2]).
:param tile_size: Size of each tile; must be >0.
:param fill_value: Value to assign to empty elements (default: `NaN`).
:param channels_first: Whether each tile is in CHW (`True`, default) or HWC (`False`) layout.
:return: A tuple containing:
- `array`: The reassembled 2D array with the smallest dimensions to contain all given tiles.
- `offset`: The lowest XY coordinates.
- `offset`: XY offset introduced by the assembly. Add this to tile coordinates to obtain
indices for the assembled array.
"""
if coords.shape[0] != tiles.shape[0]:
raise ValueError(f"Tile coordinates and values must have the same length, "
f"got {coords.shape[0]} and {tiles.shape[0]}")
if channels_first:
n_tiles, channels, tile_size, _ = tiles.shape
else:
n_tiles, tile_size, _, channels = tiles.shape
tile_xs, tile_ys = coords.T
x_min, x_max = min(tile_xs), max(tile_xs + tile_size)
y_min, y_max = min(tile_ys), max(tile_ys + tile_size)
width = x_max - x_min
height = y_max - y_min
output_shape = (channels, height, width) if channels_first else (height, width, channels)
array = np.full(output_shape, fill_value)
offset = np.array([-x_min, -y_min])
for idx in range(n_tiles):
row = coords[idx, 1] + offset[1]
col = coords[idx, 0] + offset[0]
if channels_first:
array[:, row:row + tile_size, col:col + tile_size] = tiles[idx]
else:
array[row:row + tile_size, col:col + tile_size, :] = tiles[idx]
return array, offset
|
6.爬取豆瓣排行榜电影数据(含GUI界面版)/main.py | shengqiangzhang/examples-of-web-crawlers | 12,023 | 2800 | <reponame>shengqiangzhang/examples-of-web-crawlers
# -*- coding:utf-8 -*-
from uiObject import uiObject
# main入口
if __name__ == '__main__':
ui = uiObject()
ui.ui_process() |
doc/examples.py | Enerccio/mahjong | 254 | 2804 | <gh_stars>100-1000
from mahjong.hand_calculating.hand import HandCalculator
from mahjong.meld import Meld
from mahjong.hand_calculating.hand_config import HandConfig, OptionalRules
from mahjong.shanten import Shanten
from mahjong.tile import TilesConverter
calculator = HandCalculator()
# useful helper
def print_hand_result(hand_result):
print(hand_result.han, hand_result.fu)
print(hand_result.cost['main'])
print(hand_result.yaku)
for fu_item in hand_result.fu_details:
print(fu_item)
print('')
####################################################################
# Tanyao hand by ron #
####################################################################
# we had to use all 14 tiles in that array
tiles = TilesConverter.string_to_136_array(man='22444', pin='333567', sou='444')
win_tile = TilesConverter.string_to_136_array(sou='4')[0]
result = calculator.estimate_hand_value(tiles, win_tile)
print_hand_result(result)
####################################################################
# Tanyao hand by tsumo #
####################################################################
result = calculator.estimate_hand_value(tiles, win_tile, config=HandConfig(is_tsumo=True))
print_hand_result(result)
####################################################################
# Add open set to hand #
####################################################################
melds = [Meld(meld_type=Meld.PON, tiles=TilesConverter.string_to_136_array(man='444'))]
result = calculator.estimate_hand_value(tiles, win_tile, melds=melds, config=HandConfig(options=OptionalRules(has_open_tanyao=True)))
print_hand_result(result)
####################################################################
# Shanten calculation #
####################################################################
shanten = Shanten()
tiles = TilesConverter.string_to_34_array(man='13569', pin='123459', sou='443')
result = shanten.calculate_shanten(tiles)
print(result)
####################################################################
# Kazoe as a sanbaiman #
####################################################################
tiles = TilesConverter.string_to_136_array(man='22244466677788')
win_tile = TilesConverter.string_to_136_array(man='7')[0]
melds = [
Meld(Meld.KAN, TilesConverter.string_to_136_array(man='2222'), False)
]
dora_indicators = [
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
TilesConverter.string_to_136_array(man='1')[0],
]
config = HandConfig(is_riichi=True, options=OptionalRules(kazoe=HandConfig.KAZOE_SANBAIMAN))
result = calculator.estimate_hand_value(tiles, win_tile, melds, dora_indicators, config)
print_hand_result(result)
####################################################################
# Change the cost of yaku #
####################################################################
config = HandConfig(is_renhou=True)
# renhou as an yakuman - old style
config.yaku.renhou.han_closed = 13
tiles = TilesConverter.string_to_136_array(man='22444', pin='333567', sou='444')
win_tile = TilesConverter.string_to_136_array(sou='4')[0]
result = calculator.estimate_hand_value(tiles, win_tile, config=config)
print_hand_result(result)
|
tcapygen/layoutgen.py | Ahrvo-Trading-Systems/tcapy | 189 | 2820 | from __future__ import division, print_function
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
## Web server components
import dash_core_components as dcc
import dash_html_components as html
import base64
import os
## Date/time components
import pandas as pd
import datetime
from datetime import timedelta
from collections import OrderedDict
from pandas.tseries.offsets import *
from tcapy.vis.layoutdash import LayoutDash
########################################################################################################################
class LayoutDashImplGen(LayoutDash):
"""This implements the LayoutDash abstract class, to create the web based GUI for the tcapy application. It creates two
web pages
- detailed_page - for doing detailed tcapy analysis for a specific currency pair
- aggregated_page - for more aggregated style analysis across multiple currency pairs and over multiple time periods
"""
def __init__(self, app=None, constants=None, url_prefix=''):
super(LayoutDashImplGen, self).__init__(app=app, constants=constants, url_prefix=url_prefix)
available_dates = pd.date_range(
datetime.datetime.today().date() - timedelta(days=self._constants.gui_lookback_window),
datetime.datetime.today().date(), freq=BDay())
times = pd.date_range("0:00", "23:59", freq="15min")
### create the possible values for drop down boxes on both pages
# Reverse date list (for both detailed and aggregated pages)
self.available_dates = [x.date() for x in available_dates[::-1]]
# For detailed page only
self.available_times = [t.strftime("%H:%M") for t in times]
self.available_tickers = self._constants.available_tickers_dictionary['All']
self.available_venues = self._constants.available_venues_dictionary['All']
self.available_brokers = self._constants.available_brokers_dictionary['All']
self.available_algos = self._constants.available_algos_dictionary['All']
self.available_market_data = self._constants.available_market_data
self.available_order_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'arrival', 'twap', 'vwap',
'buy trade', 'sell trade']
self.available_execution_plot_lines = ['candlestick', 'mid', 'bid', 'ask', 'buy trade', 'sell trade']
self.available_slippage_bounds = ['0.25', '0.5', '1.0', '1.25', '1.5', '2.0', 'bid/ask']
# For aggregated page only
self.available_grouped_tickers = self._flatten_dictionary(self._constants.available_tickers_dictionary)
self.available_grouped_venues = self._flatten_dictionary(self._constants.available_venues_dictionary)
self.available_grouped_brokers = self._flatten_dictionary(self._constants.available_brokers_dictionary)
self.available_grouped_algos = self._flatten_dictionary(self._constants.available_algos_dictionary)
self.available_event_types = self._constants.available_event_types
self.available_metrics = self._constants.available_metrics
self.available_reload = ['no', 'yes']
self.available_visualization = ['yes', 'no']
self.construct_layout()
def _flatten_dictionary(self, dictionary):
available = dictionary['All']
available_groups = self._util_func.dict_key_list(dictionary.keys())
return self.flatten_list_of_strings([available_groups, available])
def construct_layout(self):
self.page_content = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
link_bar_dict = {'Detailed' : 'detailed',
'Aggregated' : 'aggregated',
'Compliance' : 'compliance'}
trade_outliers_cols = ['Date', 'ticker', 'side', 'notional cur', 'benchmark', 'exec not',
'exec not in rep cur', 'slippage']
broker_cols = ['Date', 'by broker notional (rep cur)']
# Main page for detailed analysing of (eg. over the course of a few days)
self.pages['detailed'] = html.Div([
self._sc.header_bar('FX: Detailed - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='detailed-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id={'start-date-val' : self.available_dates,
'start-time-val' : self.available_times},
prefix_id='detailed'),
self._sc.drop_down(caption='Finish Date', id=OrderedDict([('finish-date-val', self.available_dates),
('finish-time-val', self.available_times)]),
prefix_id='detailed'),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='detailed',
drop_down_values=self.available_tickers),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='detailed',
drop_down_values=self.available_grouped_brokers),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='detailed',
drop_down_values=self.available_grouped_algos),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='detailed',
drop_down_values=self.available_grouped_venues),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='detailed',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='detailed',
drop_down_values=self.available_metrics)
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='detailed'),
# self.button(caption = 'Print PDF', id = 'detailed-print-pdf-button', className = 'no-print'),
# Orders
self._sc.horizontal_bar(),
self._sc.plot(caption='Orders: Timeline', id='order-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-order-candle-timeline-plot',
self.available_order_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='order-candle-timeline-download-link',
download_file='download_order_candle_timeline', height=500),
self._sc.plot(caption='Orders: Markout', id='order-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Orders: Histogram vs PDF fit', id='order-dist-plot', prefix_id='detailed', height=500),
# Execution trades
self._sc.horizontal_bar(),
self._sc.plot(caption='Executions: Timeline', id='execution-candle-timeline-plot', prefix_id='detailed',
element_add=self._sc.timeline_dropdown('detailed-execution-candle-timeline-plot',
self.available_execution_plot_lines),
downloadplot_caption='Download CSV',
downloadplot_tag='execution-candle-timeline-download-link',
download_file='download_execution_candle_timeline.csv', height=500),
self._sc.plot(caption='Executions: Markout', id='execution-markout-plot', prefix_id='detailed', height=500),
self._sc.plot(caption='Executions: Histogram vs PDF fit', id='execution-dist-plot', prefix_id='detailed', height=500),
# Detailed tcapy markout table for executions
html.Div([
html.H3('Executions: Markout Table'),
html.Div(id='detailed-execution-table')
],
style={'width': '1000px', 'display': 'inline-block', 'marginBottom': 5, 'marginTop': 5, 'marginLeft': 5,
'marginRight': 5}),
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
# Secondary page for analysing aggregated statistics over long periods of time, eg. who is the best broker?
self.pages['aggregated'] = html.Div([
self._sc.header_bar('FX: Aggregated - Trader Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='aggregated-status'), margin_left=5),
self._sc.horizontal_bar(),
# dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='aggregated',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='aggregated',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='aggregated',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='aggregated',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Event Type', id='event-type-val', prefix_id='aggregated',
drop_down_values=self.available_event_types),
self._sc.drop_down(caption='Metric', id='metric-val', prefix_id='aggregated',
drop_down_values=self.available_metrics),
]),
self._sc.horizontal_bar(),
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='aggregated'),
# , msg_id='aggregated-status'),
self._sc.horizontal_bar(),
# self.date_picker_range(caption='Start/Finish Dates', id='aggregated-date-val', offset=[-7,-1]),
self._sc.plot(caption='Aggregated Trader: Summary',
id=['execution-by-ticker-bar-plot', 'execution-by-venue-bar-plot'], prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: Timeline', id='execution-by-ticker-timeline-plot',
prefix_id='aggregated', height=500),
self._sc.horizontal_bar(),
self._sc.plot(caption='Aggregated Trader: PDF fit (' + self._constants.reporting_currency + ' notional)', id=['execution-by-ticker-dist-plot',
'execution-by-venue-dist-plot'],
prefix_id='aggregated', height=500),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
################################################################################################################
self.pages['compliance'] = html.Div([
self._sc.header_bar('FX: Compliance Analysis', img='logo.png'),
self._sc.link_bar(link_bar_dict),
self._sc.width_row_cell(html.B("Status: ok", id='compliance-status'), margin_left=5),
self._sc.horizontal_bar(),
# Dropdown selection boxes
html.Div([
self._sc.drop_down(caption='Start Date', id='start-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Finish Date', id='finish-date-val', prefix_id='compliance',
drop_down_values=self.available_dates),
self._sc.drop_down(caption='Ticker', id='ticker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_tickers, multiselect=True),
self._sc.drop_down(caption='Broker', id='broker-val', prefix_id='compliance',
drop_down_values=self.available_grouped_brokers, multiselect=True),
self._sc.drop_down(caption='Algo', id='algo-val', prefix_id='compliance',
drop_down_values=self.available_grouped_algos, multiselect=True),
self._sc.drop_down(caption='Venue', id='venue-val', prefix_id='compliance',
drop_down_values=self.available_grouped_venues, multiselect=True),
self._sc.drop_down(caption='Reload', id='reload-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Market Data', id='market-data-val', prefix_id='compliance',
drop_down_values=self.available_market_data),
self._sc.drop_down(caption='Filter by Time', id='filter-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_reload),
self._sc.drop_down(caption='Start Time of Day', id='start-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Finish Time of Day', id='finish-time-of-day-val', prefix_id='compliance',
drop_down_values=self.available_times),
self._sc.drop_down(caption='Slippage to Mid (bp)', id='slippage-bounds-val', prefix_id='compliance',
drop_down_values=self.available_slippage_bounds),
self._sc.drop_down(caption='Visualization', id='visualization-val', prefix_id='compliance',
drop_down_values=self.available_visualization)
]),
self._sc.horizontal_bar(),
html.Div([
self._sc.button(caption='Calculate', id='calculation-button', prefix_id='compliance'),
# self.date_picker(caption='Start Date', id='start-date-dtpicker', prefix_id='compliance'),
# self.date_picker(caption='Finish Date', id='finish-date-dtpicker', prefix_id='compliance'),
]),
self._sc.horizontal_bar(),
self._sc.table(caption='Compliance: Trade Outliers', id='execution-by-anomalous-table', prefix_id='compliance',
columns=trade_outliers_cols,
downloadplot_caption='Trade outliers CSV',
downloadplot_tag='execution-by-anomalous-download-link',
download_file='download_execution_by_anomalous.csv'),
self._sc.table(caption='Compliance: Totals by Broker', id='summary-by-broker-table', prefix_id='compliance',
columns=broker_cols,
downloadplot_caption='Download broker CSV',
downloadplot_tag='summary-by-broker-download-link',
download_file='download_broker.csv'
),
self._sc.horizontal_bar()
],
style={'width': '1000px', 'marginRight': 'auto', 'marginLeft': 'auto'})
# ID flags
self.id_flags = {
# Detailed trader page
# 'timeline_trade_orders' : {'client-orders': 'order', 'executions': 'trade'},
# 'markout_trade_orders' : {'client-orders': 'order_df', 'executions': 'trade_df'},
'detailed_candle_timeline_trade_order': {'execution': 'sparse_market_trade_df',
'order': 'sparse_market_order_df'},
'detailed_markout_trade_order': {'execution': 'trade_df', 'order': 'order_df'},
'detailed_table_trade_order': {'execution': 'table_trade_df_markout_by_all'},
'detailed_dist_trade_order': {'execution': 'dist_trade_df_by/pdf/side', 'order': 'dist_order_df_by/pdf/side'},
'detailed_download_link_trade_order': {'execution-candle-timeline': 'sparse_market_trade_df',
'order-candle-timeline': 'sparse_market_order_df'},
# Aggregated trader page
'aggregated_bar_trade_order': {'execution-by-ticker': 'bar_trade_df_by/mean/ticker',
'execution-by-venue': 'bar_trade_df_by/mean/venue'},
'aggregated_timeline_trade_order': {'execution-by-ticker': 'timeline_trade_df_by/mean_date/ticker',
'execution-by-venue': 'timeline_trade_df_by/mean_date/venue'},
'aggregated_dist_trade_order': {'execution-by-ticker': 'dist_trade_df_by/pdf/ticker',
'execution-by-venue': 'dist_trade_df_by/pdf/venue'},
# Compliance page
'compliance_metric_table_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
'compliance_download_link_trade_order':
{'execution-by-anomalous': 'table_trade_df_slippage_by_worst_all',
'summary-by-broker': 'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'},
}
|
devil/devil/utils/cmd_helper.py | Martijnve23/catapult | 1,894 | 2826 | <gh_stars>1000+
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper for subprocess to make calling shell commands easier."""
import codecs
import logging
import os
import pipes
import select
import signal
import string
import subprocess
import sys
import time
CATAPULT_ROOT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
SIX_PATH = os.path.join(CATAPULT_ROOT_PATH, 'third_party', 'six')
if SIX_PATH not in sys.path:
sys.path.append(SIX_PATH)
import six
from devil import base_error
logger = logging.getLogger(__name__)
_SafeShellChars = frozenset(string.ascii_letters + string.digits + '@%_-+=:,./')
# Cache the string-escape codec to ensure subprocess can find it
# later. Return value doesn't matter.
if six.PY2:
codecs.lookup('string-escape')
def SingleQuote(s):
"""Return an shell-escaped version of the string using single quotes.
Reliably quote a string which may contain unsafe characters (e.g. space,
quote, or other special characters such as '$').
The returned value can be used in a shell command line as one token that gets
to be interpreted literally.
Args:
s: The string to quote.
Return:
The string quoted using single quotes.
"""
return pipes.quote(s)
def DoubleQuote(s):
"""Return an shell-escaped version of the string using double quotes.
Reliably quote a string which may contain unsafe characters (e.g. space
or quote characters), while retaining some shell features such as variable
interpolation.
The returned value can be used in a shell command line as one token that gets
to be further interpreted by the shell.
The set of characters that retain their special meaning may depend on the
shell implementation. This set usually includes: '$', '`', '\', '!', '*',
and '@'.
Args:
s: The string to quote.
Return:
The string quoted using double quotes.
"""
if not s:
return '""'
elif all(c in _SafeShellChars for c in s):
return s
else:
return '"' + s.replace('"', '\\"') + '"'
def ShrinkToSnippet(cmd_parts, var_name, var_value):
"""Constructs a shell snippet for a command using a variable to shrink it.
Takes into account all quoting that needs to happen.
Args:
cmd_parts: A list of command arguments.
var_name: The variable that holds var_value.
var_value: The string to replace in cmd_parts with $var_name
Returns:
A shell snippet that does not include setting the variable.
"""
def shrink(value):
parts = (x and SingleQuote(x) for x in value.split(var_value))
with_substitutions = ('"$%s"' % var_name).join(parts)
return with_substitutions or "''"
return ' '.join(shrink(part) for part in cmd_parts)
def Popen(args,
stdin=None,
stdout=None,
stderr=None,
shell=None,
cwd=None,
env=None):
# preexec_fn isn't supported on windows.
# pylint: disable=unexpected-keyword-arg
if sys.platform == 'win32':
close_fds = (stdin is None and stdout is None and stderr is None)
preexec_fn = None
else:
close_fds = True
preexec_fn = lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if six.PY2:
return subprocess.Popen(
args=args,
cwd=cwd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
close_fds=close_fds,
env=env,
preexec_fn=preexec_fn
)
else:
# opens stdout in text mode, so that caller side always get 'str',
# and there will be no type mismatch error.
# Ignore any decoding error, so that caller will not crash due to
# uncaught exception. Decoding errors are unavoidable, as we
# do not know the encoding of the output, and in some output there
# will be multiple encodings (e.g. adb logcat)
return subprocess.Popen(
args=args,
cwd=cwd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
close_fds=close_fds,
env=env,
preexec_fn=preexec_fn,
universal_newlines=True,
encoding='utf-8',
errors='ignore'
)
def Call(args, stdout=None, stderr=None, shell=None, cwd=None, env=None):
pipe = Popen(
args, stdout=stdout, stderr=stderr, shell=shell, cwd=cwd, env=env)
pipe.communicate()
return pipe.wait()
def RunCmd(args, cwd=None):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
logger.debug(str(args) + ' ' + (cwd or ''))
return Call(args, cwd=cwd)
def GetCmdOutput(args, cwd=None, shell=False, env=None):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
(_, output) = GetCmdStatusAndOutput(args, cwd, shell, env)
return output
def _ValidateAndLogCommand(args, cwd, shell):
if isinstance(args, six.string_types):
if not shell:
raise Exception('string args must be run with shell=True')
else:
if shell:
raise Exception('array args must be run with shell=False')
args = ' '.join(SingleQuote(str(c)) for c in args)
if cwd is None:
cwd = ''
else:
cwd = ':' + cwd
logger.debug('[host]%s> %s', cwd, args)
return args
def GetCmdStatusAndOutput(args,
cwd=None,
shell=False,
env=None,
merge_stderr=False):
"""Executes a subprocess and returns its exit code and output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
merge_stderr: If True, captures stderr as part of stdout.
Returns:
The 2-tuple (exit code, stdout).
"""
status, stdout, stderr = GetCmdStatusOutputAndError(
args, cwd=cwd, shell=shell, env=env, merge_stderr=merge_stderr)
if stderr:
logger.critical('STDERR: %s', stderr)
logger.debug('STDOUT: %s%s', stdout[:4096].rstrip(),
'<truncated>' if len(stdout) > 4096 else '')
return (status, stdout)
def StartCmd(args, cwd=None, shell=False, env=None):
"""Starts a subprocess and returns a handle to the process.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
A process handle from subprocess.Popen.
"""
_ValidateAndLogCommand(args, cwd, shell)
return Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=shell,
cwd=cwd,
env=env)
def GetCmdStatusOutputAndError(args,
cwd=None,
shell=False,
env=None,
merge_stderr=False):
"""Executes a subprocess and returns its exit code, output, and errors.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
merge_stderr: If True, captures stderr as part of stdout.
Returns:
The 3-tuple (exit code, stdout, stderr).
"""
_ValidateAndLogCommand(args, cwd, shell)
stderr = subprocess.STDOUT if merge_stderr else subprocess.PIPE
pipe = Popen(
args,
stdout=subprocess.PIPE,
stderr=stderr,
shell=shell,
cwd=cwd,
env=env)
stdout, stderr = pipe.communicate()
return (pipe.returncode, stdout, stderr)
class TimeoutError(base_error.BaseError):
"""Module-specific timeout exception."""
def __init__(self, output=None):
super(TimeoutError, self).__init__('Timeout')
self._output = output
@property
def output(self):
return self._output
def _read_and_decode(fd, buffer_size):
data = os.read(fd, buffer_size)
if data and six.PY3:
data = data.decode('utf-8', errors='ignore')
return data
def _IterProcessStdoutFcntl(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
"""An fcntl-based implementation of _IterProcessStdout."""
# pylint: disable=too-many-nested-blocks
import fcntl
try:
# Enable non-blocking reads from the child's stdout.
child_fd = process.stdout.fileno()
fl = fcntl.fcntl(child_fd, fcntl.F_GETFL)
fcntl.fcntl(child_fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
end_time = (time.time() + timeout) if timeout else None
iter_end_time = (time.time() + iter_timeout) if iter_timeout else None
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
if iter_end_time and time.time() > iter_end_time:
yield None
iter_end_time = time.time() + iter_timeout
if iter_end_time:
iter_aware_poll_interval = min(poll_interval,
max(0, iter_end_time - time.time()))
else:
iter_aware_poll_interval = poll_interval
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if not data:
break
yield data
if process.poll() is not None:
# If process is closed, keep checking for output data (because of timing
# issues).
while True:
read_fds, _, _ = select.select([child_fd], [], [],
iter_aware_poll_interval)
if child_fd in read_fds:
data = _read_and_decode(child_fd, buffer_size)
if data:
yield data
continue
break
break
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
def _IterProcessStdoutQueue(process,
iter_timeout=None,
timeout=None,
buffer_size=4096,
poll_interval=1):
"""A Queue.Queue-based implementation of _IterProcessStdout.
TODO(jbudorick): Evaluate whether this is a suitable replacement for
_IterProcessStdoutFcntl on all platforms.
"""
# pylint: disable=unused-argument
if six.PY3:
import queue
else:
import Queue as queue
import threading
stdout_queue = queue.Queue()
def read_process_stdout():
# TODO(jbudorick): Pick an appropriate read size here.
while True:
try:
output_chunk = _read_and_decode(process.stdout.fileno(), buffer_size)
except IOError:
break
stdout_queue.put(output_chunk, True)
if not output_chunk and process.poll() is not None:
break
reader_thread = threading.Thread(target=read_process_stdout)
reader_thread.start()
end_time = (time.time() + timeout) if timeout else None
try:
while True:
if end_time and time.time() > end_time:
raise TimeoutError()
try:
s = stdout_queue.get(True, iter_timeout)
if not s:
break
yield s
except queue.Empty:
yield None
finally:
try:
if process.returncode is None:
# Make sure the process doesn't stick around if we fail with an
# exception.
process.kill()
except OSError:
pass
process.wait()
reader_thread.join()
_IterProcessStdout = (_IterProcessStdoutQueue
if sys.platform == 'win32' else _IterProcessStdoutFcntl)
"""Iterate over a process's stdout.
This is intentionally not public.
Args:
process: The process in question.
iter_timeout: An optional length of time, in seconds, to wait in
between each iteration. If no output is received in the given
time, this generator will yield None.
timeout: An optional length of time, in seconds, during which
the process must finish. If it fails to do so, a TimeoutError
will be raised.
buffer_size: The maximum number of bytes to read (and thus yield) at once.
poll_interval: The length of time to wait in calls to `select.select`.
If iter_timeout is set, the remaining length of time in the iteration
may take precedence.
Raises:
TimeoutError: if timeout is set and the process does not complete.
Yields:
basestrings of data or None.
"""
def GetCmdStatusAndOutputWithTimeout(args,
timeout,
cwd=None,
shell=False,
logfile=None,
env=None):
"""Executes a subprocess with a timeout.
Args:
args: List of arguments to the program, the program to execute is the first
element.
timeout: the timeout in seconds or None to wait forever.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
logfile: Optional file-like object that will receive output from the
command as it is running.
env: If not None, a mapping that defines environment variables for the
subprocess.
Returns:
The 2-tuple (exit code, output).
Raises:
TimeoutError on timeout.
"""
_ValidateAndLogCommand(args, cwd, shell)
output = six.StringIO()
process = Popen(
args,
cwd=cwd,
shell=shell,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
try:
for data in _IterProcessStdout(process, timeout=timeout):
if logfile:
logfile.write(data)
output.write(data)
except TimeoutError:
raise TimeoutError(output.getvalue())
str_output = output.getvalue()
logger.debug('STDOUT+STDERR: %s%s', str_output[:4096].rstrip(),
'<truncated>' if len(str_output) > 4096 else '')
return process.returncode, str_output
def IterCmdOutputLines(args,
iter_timeout=None,
timeout=None,
cwd=None,
shell=False,
env=None,
check_status=True):
"""Executes a subprocess and continuously yields lines from its output.
Args:
args: List of arguments to the program, the program to execute is the first
element.
iter_timeout: Timeout for each iteration, in seconds.
timeout: Timeout for the entire command, in seconds.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
shell: Whether to execute args as a shell command. Must be True if args
is a string and False if args is a sequence.
env: If not None, a mapping that defines environment variables for the
subprocess.
check_status: A boolean indicating whether to check the exit status of the
process after all output has been read.
Yields:
The output of the subprocess, line by line.
Raises:
CalledProcessError if check_status is True and the process exited with a
non-zero exit status.
"""
cmd = _ValidateAndLogCommand(args, cwd, shell)
process = Popen(
args,
cwd=cwd,
shell=shell,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return _IterCmdOutputLines(
process,
cmd,
iter_timeout=iter_timeout,
timeout=timeout,
check_status=check_status)
def _IterCmdOutputLines(process,
cmd,
iter_timeout=None,
timeout=None,
check_status=True):
buffer_output = ''
iter_end = None
cur_iter_timeout = None
if iter_timeout:
iter_end = time.time() + iter_timeout
cur_iter_timeout = iter_timeout
for data in _IterProcessStdout(
process, iter_timeout=cur_iter_timeout, timeout=timeout):
if iter_timeout:
# Check whether the current iteration has timed out.
cur_iter_timeout = iter_end - time.time()
if data is None or cur_iter_timeout < 0:
yield None
iter_end = time.time() + iter_timeout
continue
else:
assert data is not None, (
'Iteration received no data despite no iter_timeout being set. '
'cmd: %s' % cmd)
# Construct lines to yield from raw data.
buffer_output += data
has_incomplete_line = buffer_output[-1] not in '\r\n'
lines = buffer_output.splitlines()
buffer_output = lines.pop() if has_incomplete_line else ''
for line in lines:
yield line
if iter_timeout:
iter_end = time.time() + iter_timeout
if buffer_output:
yield buffer_output
if check_status and process.returncode:
raise subprocess.CalledProcessError(process.returncode, cmd)
|
code/tools/run_viz_single_task.py | santomon/taskonomy | 789 | 2857 | <gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import argparse
import importlib
import itertools
import time
from multiprocessing import Pool
import numpy as np
import os
import pdb
import pickle
import subprocess
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import threading
import init_paths
from models.sample_models import *
target_tasks = "autoencoder colorization curvature denoise edge2d edge3d ego_motion fix_pose impainting_whole jigsaw keypoint2d keypoint3d non_fixated_pose point_match reshade rgb2depth rgb2mist rgb2sfnorm room_layout segment25d segment2d vanishing_point_well_defined segmentsemantic_rb class_selected class_1000"
list_of_tasks = target_tasks.split(" ")
ON_TEST_SET = True
IN_TRAIN_MODE = False
parser = argparse.ArgumentParser(description='Viz Single Task')
parser.add_argument('--idx', dest='idx',
help='Task to run', type=int)
parser.add_argument('--hs', dest='hs',
help='Hidden size to use', type=int)
parser.add_argument('--n-parallel', dest='n_parallel',
help='Number of models to run in parallel', type=int)
parser.set_defaults(n_parallel=1)
tf.logging.set_verbosity(tf.logging.ERROR)
ipython_std_out = sys.stdout
# Disabe
def blockPrint():
sys.stdout = open(os.devnull, 'w')
# Restore
def enablePrint():
sys.stdout = ipython_std_out
# Force Print
def forcePrint(str):
enablePrint()
print(str)
sys.stdout.flush()
blockPrint()
def remove_dups(seq):
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
pairs = list(itertools.product(list_of_tasks, list_of_tasks))
args = parser.parse_args()
idx_to_run = args.idx
if idx_to_run == -1:
pairs_to_run = pairs
else:
pairs_to_run = pairs[idx_to_run:idx_to_run+1]
def run_to_task(task_to):
import general_utils
from general_utils import RuntimeDeterminedEnviromentVars
import models.architectures as architectures
from data.load_ops import resize_rescale_image
import utils
from data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction
import lib.data.load_ops as load_ops
tf.logging.set_verbosity(tf.logging.ERROR)
all_outputs = {}
pickle_dir = 'viz_output_single_task.pkl'
import os
if os.path.isfile(pickle_dir):
with open( pickle_dir, 'rb') as fp:
all_outputs = pickle.load(fp)
for task in list_of_tasks:
if task in all_outputs:
print("{} already exists....\n\n\n".format(task))
continue
print("Doing {task}".format(task=task))
general_utils = importlib.reload(general_utils)
tf.reset_default_graph()
training_runners = { 'sess': tf.InteractiveSession(), 'coord': tf.train.Coordinator() }
# task = '{f}__{t}__{hs}'.format(f=task_from, t=task_to, hs=args.hs)
CONFIG_DIR = '/home/ubuntu/task-taxonomy-331b/experiments/final/{TASK}'.format(TASK=task)
############## Load Configs ##############
cfg = utils.load_config( CONFIG_DIR, nopause=True )
RuntimeDeterminedEnviromentVars.register_dict( cfg )
split_file = cfg['test_filenames'] if ON_TEST_SET else cfg['val_filenames']
cfg['train_filenames'] = split_file
cfg['val_filenames'] = split_file
cfg['test_filenames'] = split_file
cfg['num_epochs'] = 1
cfg['randomize'] = False
root_dir = cfg['root_dir']
cfg['num_read_threads'] = 1
print(cfg['log_root'])
if task == 'jigsaw':
continue
cfg['model_path'] = os.path.join(
cfg['log_root'],
task,
'model.permanent-ckpt'
)
print( cfg['model_path'])
if cfg['model_path'] is None:
continue
############## Set Up Inputs ##############
# tf.logging.set_verbosity( tf.logging.INFO )
inputs = utils.setup_input( cfg, is_training=ON_TEST_SET, use_filename_queue=False ) # is_training determines whether to use train/validaiton
RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
RuntimeDeterminedEnviromentVars.populate_registered_variables()
start_time = time.time()
# utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )
############## Set Up Model ##############
model = utils.setup_model( inputs, cfg, is_training=IN_TRAIN_MODE )
m = model[ 'model' ]
model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )
############## Start dataloading workers ##############
data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn(
inputs, cfg, is_training=ON_TEST_SET, use_filename_queue=False )
prefetch_threads = threading.Thread(
target=data_prefetch_init_fn,
args=( training_runners[ 'sess' ], training_runners[ 'coord' ] ))
prefetch_threads.start()
############## Run First Batch ##############
if not hasattr(m, 'masks'):
(
input_batch, target_batch,
data_idx,
predicted, loss,
) = training_runners['sess'].run( [
m.input_images, m.targets,
model[ 'data_idxs' ],
m.decoder_output, m.total_loss] )
mask_batch = 1.
else:
(
input_batch, target_batch, mask_batch,
data_idx,
predicted, loss,
) = training_runners['sess'].run( [
m.input_images, m.targets, m.masks,
model[ 'data_idxs' ],
m.decoder_output, m.total_loss] )
if task == 'segment2d' or task == 'segment25d':
from sklearn.decomposition import PCA
x = np.zeros((32,256,256,3), dtype='float')
for i in range(predicted.shape[0]):
embedding_flattened = np.squeeze(predicted[i]).reshape((-1,64))
pca = PCA(n_components=3)
pca.fit(embedding_flattened)
lower_dim = pca.transform(embedding_flattened).reshape((256,256,-1))
lower_dim = (lower_dim - lower_dim.min()) / (lower_dim.max() - lower_dim.min())
x[i] = lower_dim
predicted = x
############## Clean Up ##############
training_runners[ 'coord' ].request_stop()
training_runners[ 'coord' ].join()
# if os.path.isfile(pickle_dir):
# with open(pickle_dir, 'rb') as fp:
# all_outputs = pickle.load(fp)
############## Store to dict ##############
to_store = {
'input': input_batch,
'target': target_batch,
'mask': mask_batch,
'data_idx':data_idx,
'output':predicted}
all_outputs[task] = to_store
print("Done: {}".format(task))
# os.system("sudo cp {d} /home/ubuntu/s3/model_log".format(d=pickle_dir))
############## Reset graph and paths ##############
tf.reset_default_graph()
training_runners['sess'].close()
try:
del sys.modules[ 'config' ]
except:
pass
sys.path = remove_dups(sys.path)
print("FINISHED: {}\n\n\n\n\n\n".format(task))
pickle_dir = 'viz_output_single_task.pkl'
with open( pickle_dir, 'wb') as fp:
pickle.dump(all_outputs, fp)
try:
subprocess.call("aws s3 cp {} s3://task-preprocessing-512-oregon/visualizations/".format(pickle_dir), shell=True)
except:
subprocess.call("sudo cp {} /home/ubuntu/s3/visualizations/".format(pickle_dir), shell=True)
return
if __name__ == '__main__':
run_to_task(None)
# with Pool(args.n_parallel) as p:
# p.map(run_to_task, list_of_tasks)
|
stratum/portage/build_defs.bzl | cholve/stratum | 267 | 2858 | <reponame>cholve/stratum<filename>stratum/portage/build_defs.bzl
# Copyright 2018 Google LLC
# Copyright 2018-present Open Networking Foundation
# SPDX-License-Identifier: Apache-2.0
"""A portable build system for Stratum P4 switch stack.
To use this, load() this file in a BUILD file, specifying the symbols needed.
The public symbols are the macros:
decorate(path)
sc_cc_lib Declare a portable Library.
sc_proto_lib Declare a portable .proto Library.
sc_cc_bin Declare a portable Binary.
sc_package Declare a portable tarball package.
and the variables/lists:
ALL_ARCHES All known arches.
EMBEDDED_ARCHES All embedded arches.
EMBEDDED_PPC Name of PowerPC arch - "ppc".
EMBEDDED_X86 Name of "x86" arch.
HOST_ARCH Name of default "host" arch.
HOST_ARCHES All host arches.
STRATUM_INTERNAL For declaring Stratum internal visibility.
The macros are like cc_library(), proto_library(), and cc_binary(), but with
different options and some restrictions. The key difference: you can
supply lists of architectures for which they should be compiled - defaults
to all if left unstated. Internally, libraries and binaries are generated
for every listed architecture. The names are decorated to keep them different
and allow all to be generated and addressed independently.
This aspect of the system is suboptimal - something along the lines of
augmenting context with a user defined configuration fragment would be a
much cleaner solution.
Currently supported architectures:
ppc
x86
"""
load("//tools/build_defs/label:def.bzl", "parse_label")
load(
"//devtools/build_cleaner/skylark:build_defs.bzl",
"register_extension_info",
)
load("@rules_proto//proto:defs.bzl", "proto_library")
load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test")
# Generic path & label helpers. ============================================
def _normpath(path):
"""Normalize a path.
Normalizes a path by removing unnecessary path-up segments and its
corresponding directories. Providing own implementation because import os
is not allowed in build defs.
For example
../../dir/to/deeply/nested/path/../../../other/path
will become
../../dir/to/other/path
Args:
path: A valid absolute or relative path to normalize.
Returns:
A path equivalent to the input path with minimal use of path-up segments.
Invalid input paths will stay invalid.
"""
sep = "/"
level = 0
result = []
for d in path.split(sep):
if d in ("", "."):
if result:
continue
elif d == "..":
if level > 0:
result.pop()
level += -1
continue
else:
level += 1
result.append(d)
return sep.join(result)
# Adds a suffix to a label, expanding implicit targets if needed.
def decorate(label, suffix):
if label.endswith(":"): # .../bar: -> .../bar
label = label[:-1]
if ":" in label: # .../bar:bat -> .../bar:bat_suffix
return "%s_%s" % (label, suffix)
elif label.startswith("//"): # //foo/bar -> //foo/bar:bar_suffix
return "%s:%s_%s" % (label, label.split("/")[-1], suffix)
else: # bar -> bar_suffix
return "%s_%s" % (label, suffix)
# Creates a relative filename from a label, replacing "//" and ":".
def _make_filename(label):
if label.startswith("//"): # //foo/bar:bat/baz -> google3_foo/bar/bat/baz
return label.replace("//", "google3/").replace(":", "/")
elif label.startswith(":"): # :bat/baz -> bat/baz
return label[1:]
else: # bat/baz -> bat/baz
return label
# Adds dquotes around a string.
def dquote(s):
return '"' + s + '"'
# Adds squotes around a string.
def squote(s):
return "'" + s + "'"
# Emulate Python 2.5+ str(startswith([prefix ...])
def starts_with(s, prefix_list):
for prefix in prefix_list:
if s.startswith(prefix):
return prefix
return None
def sc_platform_select(host = None, ppc = None, x86 = None, default = None):
"""Public macro to alter blaze rules based on the platform architecture.
Generates a blaze select(...) statement that can be used in most contexts to
alter a blaze rule based on the target platform architecture. If no selection
is provided for a given platform, {default} is used instead. A specific value
or default must be provided for every target platform.
Args:
host: The value to use for host builds.
ppc: The value to use for ppc builds.
x86: The value to use for x86 builds.
default: The value to use for any of {host,ppc,x86} that isn't specified.
Returns:
The requested selector.
"""
if default == None and (host == None or ppc == None or x86 == None):
fail("Missing a select value for at least one platform in " +
"sc_platform_select. Please add.")
config_label_prefix = "//stratum:stratum_"
return select({
"//conditions:default": (host or default),
config_label_prefix + "ppc": (ppc or default),
config_label_prefix + "x86": (x86 or default),
})
# Generates an sc_platform_select based on a textual list of arches.
def sc_platform_filter(value, default, arches):
return sc_platform_select(
host = value if "host" in arches else default,
ppc = value if "ppc" in arches else default,
x86 = value if "x86" in arches else default,
)
def sc_platform_alias(
name,
host = None,
ppc = None,
x86 = None,
default = None,
visibility = None):
"""Public macro to create an alias that changes based on target arch.
Generates a blaze alias that will select the appropriate target. If no
selection is provided for a given platform and no default is set, a
dummy default target is used instead.
Args:
name: The name of the alias target.
host: The result of the alias for host builds.
ppc: The result of the alias for ppc builds.
x86: The result of the alias for x86 builds.
default: The result of the alias for any of {host,ppc,x86} that isn't
specified.
visibility: The visibility of the alias target.
"""
native.alias(
name = name,
actual = sc_platform_select(
default = default or "//stratum/portage:dummy",
host = host,
ppc = ppc,
x86 = x86,
),
visibility = visibility,
)
# Embedded build definitions. ==============================================
EMBEDDED_PPC = "ppc"
EMBEDDED_X86 = "x86"
EMBEDDED_ARCHES = [
EMBEDDED_PPC,
EMBEDDED_X86,
]
HOST_ARCH = "host"
HOST_ARCHES = [HOST_ARCH]
ALL_ARCHES = EMBEDDED_ARCHES + HOST_ARCHES
# Identify Stratum platform arch for .pb.h shims and other portability hacks.
_ARCH_DEFINES = sc_platform_select(
default = ["STRATUM_ARCH_HOST"],
ppc = ["STRATUM_ARCH_PPC"],
x86 = ["STRATUM_ARCH_X86"],
)
STRATUM_INTERNAL = [
"//stratum:__subpackages__",
]
#
# Build options for all embedded architectures
#
# Set _TRACE_SRCS to show sources in embedded sc_cc_lib compile steps.
# This is more general than it may seem: genrule doesn't have hdrs or deps
# attributes, so all embedded dependencies appear as a `src'.
# TODO(unknown): if useful again then inject from cmdline else kill feature.
_TRACE_SRCS = False
# Used for all gcc invocations.
_EMBEDDED_FLAGS = [
"-O0", # Don't use this for program-sizing build
#-- "-Os", # Use this for program-sizing build
"-g", # Don't use this for program-sizing build
"-Wall",
"-Werror", # Warn lots, and force fixing warnings.
"-no-canonical-prefixes", # Don't mangle paths and confuse blaze.
"-fno-builtin-malloc", # We'll use tcmalloc
"-fno-builtin-calloc",
"-fno-builtin-realloc",
"-fno-builtin-free",
"-D__STDC_FORMAT_MACROS=1",
# TODO(unknown): Figure out how we can use $(CC_FLAGS) instead of this.
"-D__GOOGLE_STL_LEGACY_COMPATIBILITY",
]
# Used for C and C++ compiler invocations.
_EMBEDDED_CFLAGS = [
"-I$(GENDIR)",
]
# Used for C++ compiler invocations.
_EMBEDDED_CXXFLAGS = [
"-std=gnu++11", # Allow C++11 features _and_ GNU extensions.
]
# Used for linking binaries.
_EMBEDDED_LDFLAGS = [
# "-static", # Use this for program-sizing build
# "-Wl,--gc-sections,--no-wchar-size-warning", # Use this for program-sizing build
]
# PPC ======================================================================
_PPC_GRTE = "//unsupported_toolchains/crosstoolng_powerpc32_8540/sysroot"
# X86 ======================================================================
_X86_GRTE = "//grte/v4_x86/release/usr/grte/v4"
# Portability definitions ===================================================
def sc_cc_test(
name,
size = None,
srcs = None,
deps = None,
data = None,
defines = None,
copts = None,
linkopts = None,
visibility = None):
"""Creates a cc_test rule that interacts safely with Stratum builds.
Generates a cc_test rule that doesn't break the build when an embedded arch
is selected. During embedded builds this target will generate a dummy binary
and will not attempt to build any dependencies.
Args:
name: Analogous to cc_test name argument.
size: Analogous to cc_test size argument.
srcs: Analogous to cc_test srcs argument.
deps: Analogous to cc_test deps argument.
data: Analogous to cc_test data argument.
defines: Analogous to cc_test defines argument.
copts: Analogous to cc_test copts argument.
linkopts: Analogous to cc_test linkopts argument.
visibility: Analogous to cc_test visibility argument.
"""
cc_test(
name = name,
size = size or "small",
srcs = sc_platform_select(host = srcs or [], default = []),
deps = sc_platform_select(
host = deps or [],
default = ["//stratum/portage:dummy_with_main"],
),
data = data or [],
defines = defines,
copts = copts,
linkopts = linkopts,
visibility = visibility,
)
register_extension_info(
extension_name = "sc_cc_test",
label_regex_for_dep = "{extension_name}",
)
def sc_cc_lib(
name,
deps = None,
srcs = None,
hdrs = None,
arches = None,
copts = None,
defines = None,
includes = None,
include_prefix = None,
strip_include_prefix = None,
data = None,
testonly = None,
textual_hdrs = None,
visibility = None,
xdeps = None):
"""Creates rules for the given portable library and arches.
Args:
name: Analogous to cc_library name argument.
deps: Analogous to cc_library deps argument.
srcs: Analogous to cc_library srcs argument.
hdrs: Analogous to cc_library hdrs argument.
arches: List of architectures to generate this way.
copts: Analogous to cc_library copts argument.
defines: Symbols added as "-D" compilation options.
includes: Paths to add as "-I" compilation options.
include_prefix: Analogous to cc_library include_prefix argument.
strip_include_prefix: Analogous to cc_library strip_include_prefix argument.
data: Files to provide as data at runtime (host builds only).
testonly: Standard blaze testonly parameter.
textual_hdrs: Analogous to cc_library.
visibility: Standard blaze visibility parameter.
xdeps: External (file) dependencies of this library - no decorations
assumed, used and exported as header, not for flags, libs, etc.
"""
alwayslink = 0
deps = depset(deps or [])
srcs = depset(srcs or [])
hdrs = depset(hdrs or [])
xdeps = depset(xdeps or [])
copts = depset(copts or [])
includes = depset(includes or [])
data = depset(data or [])
textual_hdrs = depset(textual_hdrs or [])
if srcs:
if [s for s in srcs.to_list() if not s.endswith(".h")]:
alwayslink = 1
if not arches:
arches = ALL_ARCHES
defs_plus = (defines or []) + _ARCH_DEFINES
textual_plus = textual_hdrs | depset(deps.to_list())
cc_library(
name = name,
deps = sc_platform_filter(deps, [], arches),
srcs = sc_platform_filter(srcs, [], arches),
hdrs = sc_platform_filter(hdrs, [], arches),
alwayslink = alwayslink,
copts = sc_platform_filter(copts, [], arches),
defines = defs_plus,
includes = sc_platform_filter(includes, [], arches),
include_prefix = include_prefix,
strip_include_prefix = strip_include_prefix,
testonly = testonly,
textual_hdrs = sc_platform_filter(
textual_plus | xdeps,
[],
arches,
),
data = sc_platform_filter(data, [], arches),
visibility = visibility,
)
register_extension_info(
extension_name = "sc_cc_lib",
label_regex_for_dep = "{extension_name}",
)
def sc_cc_bin(
name,
deps = None,
srcs = None,
arches = None,
copts = None,
defines = None,
includes = None,
testonly = None,
visibility = None):
"""Creates rules for the given portable binary and arches.
Args:
name: Analogous to cc_binary name argument.
deps: Analogous to cc_binary deps argument.
srcs: Analogous to cc_binary srcs argument.
arches: List of architectures to generate this way.
copts: Analogous to cc_binary copts argument.
defines: Symbols added as "-D" compilation options.
includes: Paths to add as "-I" compilation options.
testonly: Standard blaze testonly parameter.
visibility: Standard blaze visibility parameter.
"""
deps = depset(deps or [])
srcs = depset(srcs or [])
if not arches:
arches = ALL_ARCHES
defs_plus = (defines or []) + _ARCH_DEFINES
cc_binary(
name = name,
deps = sc_platform_filter(
deps,
["//stratum/portage:dummy_with_main"],
arches,
),
srcs = sc_platform_filter(srcs, [], arches),
copts = copts,
defines = defs_plus,
includes = includes,
linkopts = ["-ldl", "-lutil"],
testonly = testonly,
visibility = visibility,
)
register_extension_info(
extension_name = "sc_cc_bin",
label_regex_for_dep = "{extension_name}",
)
# Protobuf =================================================================
_SC_GRPC_DEPS = [
"//sandblaze/prebuilt/grpc",
"//sandblaze/prebuilt/grpc:grpc++_codegen_base",
"//sandblaze/prebuilt/grpc:grpc++_codegen_proto_lib",
]
_PROTOC = "@com_google_protobuf//:protobuf:protoc"
_PROTOBUF = "@com_google_protobuf//:protobuf"
_SC_GRPC_PLUGIN = "//sandblaze/prebuilt/protobuf:grpc_cpp_plugin"
_GRPC_PLUGIN = "//grpc:grpc_cpp_plugin"
def _loc(target):
"""Return target location for constructing commands.
Args:
target: Blaze target name available to this build.
Returns:
$(location target)
"""
return "$(location %s)" % target
def _gen_proto_lib(
name,
srcs,
hdrs,
deps,
arch,
visibility,
testonly,
proto_include,
grpc_shim_rule):
"""Creates rules and filegroups for embedded protobuf library.
For every given ${src}.proto, generate:
:${src}_${arch}.pb rule to run protoc
${src}.proto => ${src}.${arch}.pb.{h,cc}
:${src}_${arch}.grpc.pb rule to run protoc w/ erpc plugin:
${src}.proto => ${src}.${arch}.grpc.pb.{h,cc}
:${src}_${arch}_proto_rollup collects include options for protoc:
${src}_${arch}_proto_rollup.flags
Feed each set into sc_cc_lib to wrap them them up into a usable library;
note that ${src}_${arch}_erpc_proto depends on ${src}_${arch}_proto.
Args:
name: Base name for this library.
srcs: List of proto files
hdrs: More files to build into this library, but also exported for
dependent rules to utilize.
deps: List of deps for this library
arch: Which architecture to build this library for.
visibility: Standard blaze visibility parameter, passed through to
subsequent rules.
testonly: Standard blaze testonly parameter.
proto_include: Include path for generated sc_cc_libs.
grpc_shim_rule: If needed, the name of the grpc shim for this proto lib.
"""
bash_vars = ["g3=$${PWD}"]
# TODO(unknown): Switch protobuf to using the proto_include mechanism
protoc_label = _PROTOC
protobuf_label = _PROTOBUF
protobuf_hdrs = "%s:well_known_types_srcs" % protobuf_label
protobuf_srcs = [protobuf_hdrs]
protobuf_include = "$${g3}/protobuf/src"
if arch in EMBEDDED_ARCHES:
grpc_plugin = _SC_GRPC_PLUGIN
else:
grpc_plugin = _GRPC_PLUGIN
protoc_deps = []
for dep in deps:
if dep.endswith("_proto"):
protoc_deps.append("%s_%s_headers" % (dep, arch))
name_arch = decorate(name, arch)
# We use this filegroup to accumulate the set of .proto files needed to
# compile this proto.
native.filegroup(
name = decorate(name_arch, "headers"),
srcs = hdrs + protoc_deps,
visibility = visibility,
)
my_proto_rollup = decorate(name_arch, "proto_rollup.flags")
protoc_srcs_set = (srcs + hdrs + protoc_deps +
protobuf_srcs + [my_proto_rollup])
gen_srcs = []
gen_hdrs = []
grpc_gen_hdrs = []
grpc_gen_srcs = []
tools = [protoc_label]
grpc_tools = [protoc_label, grpc_plugin]
protoc = "$${g3}/%s" % _loc(protoc_label)
grpc_plugin = "$${g3}/%s" % _loc(grpc_plugin)
cpp_out = "$${g3}/$(GENDIR)/%s/%s" % (native.package_name(), arch)
accum_flags = []
full_proto_include = None
if proto_include == ".":
full_proto_include = native.package_name()
elif proto_include:
full_proto_include = "%s/%s" % (native.package_name(), proto_include)
if full_proto_include:
temp_prefix = "%s/%s" % (cpp_out, native.package_name()[len(full_proto_include):])
# We do a bit of extra work with these include flags to avoid generating
# warnings.
accum_flags.append(
"$$(if [[ -e $(GENDIR)/%s ]]; then echo -IG3LOC/$(GENDIR)/%s; fi)" %
(full_proto_include, full_proto_include),
)
accum_flags.append(
"$$(if [[ -e %s ]]; then echo -IG3LOC/%s; fi)" %
(full_proto_include, full_proto_include),
)
else:
temp_prefix = "%s/%s" % (cpp_out, native.package_name())
proto_rollups = [
decorate(decorate(dep, arch), "proto_rollup.flags")
for dep in deps
if dep.endswith("_proto")
]
proto_rollup_cmds = ["printf '%%s\n' %s" % flag for flag in accum_flags]
proto_rollup_cmds.append("cat $(SRCS)")
proto_rollup_cmd = "{ %s; } | sort -u -o $(@)" % "; ".join(proto_rollup_cmds)
native.genrule(
name = decorate(name_arch, "proto_rollup"),
srcs = proto_rollups,
outs = [my_proto_rollup],
cmd = proto_rollup_cmd,
visibility = visibility,
testonly = testonly,
)
for src in srcs + hdrs:
if src.endswith(".proto"):
src_stem = src[0:-6]
src_arch = "%s_%s" % (src_stem, arch)
temp_stem = "%s/%s" % (temp_prefix, src_stem)
gen_stem = "%s.%s" % (src_stem, arch)
# We can't use $${PWD} until this step, because our rollup command
# might be generated on another forge server.
proto_path_cmds = ["rollup=$$(sed \"s,G3LOC,$${PWD},g\" %s)" %
_loc(my_proto_rollup)]
proto_rollup_flags = ["$${rollup}"]
if proto_include:
# We'll be cd-ing to another directory before protoc, so
# adjust our .proto path accordingly.
proto_src_loc = "%s/%s" % (native.package_name(), src)
if proto_src_loc.startswith(full_proto_include + "/"):
proto_src_loc = proto_src_loc[len(full_proto_include) + 1:]
else:
print("Invalid proto include '%s' doesn't match src %s" %
(full_proto_include, proto_src_loc))
# By cd-ing to another directory, we force protoc to produce
# different symbols. Careful, our proto might be in GENDIR!
proto_path_cmds.append("; ".join([
"if [[ -e %s ]]" % ("%s/%s" % (full_proto_include, proto_src_loc)),
"then cd %s" % full_proto_include,
"else cd $(GENDIR)/%s" % full_proto_include,
"fi",
]))
gendir_include = ["-I$${g3}/$(GENDIR)", "-I$${g3}", "-I."]
else:
proto_src_loc = "%s/%s" % (native.package_name(), src)
proto_path_cmds.append("[[ -e %s ]] || cd $(GENDIR)" % proto_src_loc)
gendir_include = ["-I$(GENDIR)", "-I."]
# Generate messages
gen_pb_h = gen_stem + ".pb.h"
gen_pb_cc = gen_stem + ".pb.cc"
gen_hdrs.append(gen_pb_h)
gen_srcs.append(gen_pb_cc)
cmds = bash_vars + [
"mkdir -p %s" % temp_prefix,
] + proto_path_cmds + [
" ".join([protoc] +
gendir_include +
proto_rollup_flags +
[
"-I%s" % protobuf_include,
"--cpp_out=%s" % cpp_out,
proto_src_loc,
]),
"cd $${g3}",
"cp %s.pb.h %s" % (temp_stem, _loc(gen_pb_h)),
"cp %s.pb.cc %s" % (temp_stem, _loc(gen_pb_cc)),
]
pb_outs = [gen_pb_h, gen_pb_cc]
native.genrule(
name = src_arch + ".pb",
srcs = protoc_srcs_set,
outs = pb_outs,
tools = tools,
cmd = " && ".join(cmds),
heuristic_label_expansion = 0,
visibility = visibility,
)
# Generate GRPC
if grpc_shim_rule:
gen_grpc_pb_h = gen_stem + ".grpc.pb.h"
gen_grpc_pb_cc = gen_stem + ".grpc.pb.cc"
grpc_gen_hdrs.append(gen_grpc_pb_h)
grpc_gen_srcs.append(gen_grpc_pb_cc)
cmds = bash_vars + [
"mkdir -p %s" % temp_prefix,
] + proto_path_cmds + [
" ".join([
protoc,
"--plugin=protoc-gen-grpc-cpp=%s" % grpc_plugin,
] +
gendir_include +
proto_rollup_flags +
[
"-I%s" % protobuf_include,
"--grpc-cpp_out=%s" % cpp_out,
proto_src_loc,
]),
"cd $${g3}",
"cp %s.grpc.pb.h %s" % (temp_stem, _loc(gen_grpc_pb_h)),
"cp %s.grpc.pb.cc %s" % (temp_stem, _loc(gen_grpc_pb_cc)),
]
grpc_pb_outs = [gen_grpc_pb_h, gen_grpc_pb_cc]
native.genrule(
name = src_arch + ".grpc.pb",
srcs = protoc_srcs_set,
outs = grpc_pb_outs,
tools = grpc_tools,
cmd = " && ".join(cmds),
heuristic_label_expansion = 0,
visibility = visibility,
)
dep_set = depset(deps) | [protobuf_label]
includes = []
if proto_include:
includes = [proto_include]
# Note: Public sc_proto_lib invokes this once per (listed) arch;
# which then calls sc_cc_lib with same name for each arch;
# multiple such calls are OK as long as the arches are disjoint.
sc_cc_lib(
name = decorate(name, arch),
deps = dep_set,
srcs = gen_srcs,
hdrs = hdrs + gen_hdrs,
arches = [arch],
copts = [],
includes = includes,
testonly = testonly,
textual_hdrs = gen_hdrs,
visibility = visibility,
)
if grpc_shim_rule:
grpc_name = name[:-6] + "_grpc_proto"
grpc_dep_set = dep_set | [name] | _SC_GRPC_DEPS
grpc_gen_hdrs_plus = grpc_gen_hdrs + gen_hdrs
sc_cc_lib(
name = decorate(grpc_name, arch),
deps = grpc_dep_set,
srcs = grpc_gen_srcs,
hdrs = hdrs + grpc_gen_hdrs_plus + [grpc_shim_rule],
arches = [arch],
copts = [],
includes = includes,
testonly = testonly,
textual_hdrs = grpc_gen_hdrs_plus,
visibility = visibility,
)
def _gen_proto_shims(name, pb_modifier, srcs, arches, visibility):
"""Macro to build .pb.h multi-arch master switch for sc_proto_lib.
For each src path.proto, generates path.pb.h consisting of:
#ifdef logic to select path.${arch}.pb.h
Also generates an alias that will select the appropriate proto target
based on the currently selected platform architecture.
Args:
name: Base name for this library.
pb_modifier: protoc plugin-dependent file extension (e.g.: .pb)
srcs: List of proto files.
arches: List of arches this shim should support.
visibility: The blaze visibility of the generated alias.
Returns:
Name of shim rule for use in follow-on hdrs and/or src lists.
"""
outs = []
cmds = []
hdr_ext = pb_modifier + ".h"
for src in srcs:
pkg, filename = parse_label(src)
if not filename.endswith(".proto"):
continue
hdr_stem = filename[0:-6]
new_hdr_name = hdr_stem + hdr_ext
outs.append(new_hdr_name)
# Generate lines for shim switch file.
# Lines expand inside squotes, so quote accordingly.
include_fmt = "#include " + dquote(pkg + "/" + hdr_stem + ".%s" + hdr_ext)
lines = [
"#if defined(STRATUM_ARCH_%s)" % "PPC",
include_fmt % "ppc",
"#elif defined(STRATUM_ARCH_%s)" % "X86",
include_fmt % "x86",
"#elif defined(STRATUM_ARCH_%s)" % "HOST",
include_fmt % "host",
"#else",
"#error Unknown STRATUM_ARCH",
"#endif",
]
gen_cmds = [("printf '%%s\\n' '%s'" % line) for line in lines]
new_hdr_loc = "$(location %s)" % new_hdr_name
cmds.append("{ %s; } > %s" % (" && ".join(gen_cmds), new_hdr_loc))
shim_rule = decorate(name, "shims")
native.genrule(
name = shim_rule,
srcs = srcs,
outs = outs,
cmd = " && ".join(cmds) or "true",
)
sc_platform_alias(
name = name,
host = decorate(name, "host") if "host" in arches else None,
ppc = decorate(name, "ppc") if "ppc" in arches else None,
x86 = decorate(name, "x86") if "x86" in arches else None,
visibility = visibility,
)
return shim_rule
def _gen_py_proto_lib(name, srcs, deps, visibility, testonly):
"""Creates a py_proto_library from the given srcs.
There's no clean way to make python protos work with sc_proto_lib's
proto_include field, so we keep this simple.
For library "name", generates:
* ${name}_default_pb, a regular proto library.
* ${name}_py, a py_proto_library based on ${name}_default_pb.
Args:
name: Standard blaze name argument.
srcs: Standard blaze srcs argument.
deps: Standard blaze deps argument.
visibility: Standard blaze visibility argument.
testonly: Standard blaze testonly argument.
"""
regular_proto_name = decorate(name, "default_pb")
py_name = decorate(name, "py")
proto_library(
name = regular_proto_name,
srcs = srcs,
deps = [decorate(dep, "default_pb") for dep in deps],
visibility = visibility,
testonly = testonly,
)
native.py_proto_library(
name = py_name,
api_version = 2,
deps = [regular_proto_name],
visibility = visibility,
testonly = testonly,
)
# TODO(unknown): Add support for depending on normal proto_library rules.
def sc_proto_lib(
name = None,
srcs = [],
hdrs = [],
deps = [],
arches = [],
visibility = None,
testonly = None,
proto_include = None,
python_support = False,
services = []):
"""Public macro to build multi-arch library from Message protobuf(s).
For library "name", generates:
* ${name}_shim aka .pb.h master switch - see _gen_proto_shims, above.
* ${name}_${arch}_pb protobuf compile rules - one for each arch.
* sc_cc_lib(name) with those as input.
* ${name}_py a py_proto_library version of this library. Only generated
if python_support == True.
Args:
name: Base name for this library.
srcs: List of .proto files - private to this library.
hdrs: As above, but also exported for dependent rules to utilize.
deps: List of deps for this library
arches: Which architectures to build this library for, None => ALL.
visibility: Standard blaze visibility parameter, passed through to
subsequent rules.
testonly: Standard blaze testonly parameter.
proto_include: Path to add to include path. This will affect the
symbols generated by protoc, as well as the include
paths used for both sc_cc_lib and sc_proto_lib rules
that depend on this rule. Typically "."
python_support: Defaults to False. If True, generate a python proto library
from this rule. Any sc_proto_lib with python support may
only depend on sc_proto_libs that also have python support,
and may not use the proto_include field in this rule.
services: List of services to enable {"grpc", "rpc"};
Only "grpc" is supported. So "rpc" and "grpc" are equivalent.
"""
if not arches:
if testonly:
arches = HOST_ARCHES
else:
arches = ALL_ARCHES
service_enable = {
"grpc": 0,
}
for service in services or []:
if service == "grpc":
service_enable["grpc"] = 1
elif service == "rpc":
service_enable["grpc"] = 1
else:
fail("service='%s' not in (grpc, rpc)" % service)
deps = depset(deps or [])
shim_rule = _gen_proto_shims(
name = name,
pb_modifier = ".pb",
srcs = srcs + hdrs,
arches = arches,
visibility = visibility,
)
grpc_shim_rule = None
if (service_enable["grpc"]):
grpc_shim_rule = _gen_proto_shims(
name = decorate(name[:-6], "grpc_proto"),
pb_modifier = ".grpc.pb",
srcs = srcs + hdrs,
arches = arches,
visibility = visibility,
)
for arch in arches:
_gen_proto_lib(
name = name,
srcs = srcs,
hdrs = [shim_rule] + hdrs,
deps = deps,
arch = arch,
visibility = visibility,
testonly = testonly,
proto_include = proto_include,
grpc_shim_rule = grpc_shim_rule,
)
if python_support:
if proto_include:
fail("Cannot use proto_include on an sc_proto_lib with python support.")
_gen_py_proto_lib(
name = name,
srcs = depset(srcs + hdrs),
deps = deps,
visibility = visibility,
testonly = testonly,
)
register_extension_info(
extension_name = "sc_proto_lib",
label_regex_for_dep = "{extension_name}",
)
def sc_package(
name = None,
bins = None,
data = None,
deps = None,
arches = None,
visibility = None):
"""Public macro to package binaries and data for deployment.
For package "name", generates:
* ${name}_${arch}_bin and ${name}_${arch}_data filesets containing
respectively all of the binaries and all of the data needed for this
package and all dependency packages.
* ${name}_${arch} fileset containing the corresponding bin and data
filesets, mapped to bin/ and share/ respectively.
* ${name}_${arch}_tarball rule builds that .tar.gz package.
Args:
name: Base name for this package.
bins: List of sc_cc_bin rules to be packaged.
data: List of files (and file producing rules) to be packaged.
deps: List of other sc_packages to add to this package.
arches: Which architectures to build this library for,
None => EMBEDDED_ARCHES (HOST_ARCHES not generally supported).
visibility: Standard blaze visibility parameter, passed through to
all filesets.
"""
bins = depset(bins or [])
data = depset(data or [])
deps = depset(deps or [])
if not arches:
arches = EMBEDDED_ARCHES
fileset_name = decorate(name, "fs")
for extension, inputs in [
("bin", ["%s.stripped" % b for b in bins.to_list()]),
("data", data),
]:
native.Fileset(
name = decorate(fileset_name, extension),
out = decorate(name, extension),
entries = [
native.FilesetEntry(
files = inputs,
),
] + [
native.FilesetEntry(srcdir = decorate(dep, extension))
for dep in deps.to_list()
],
visibility = visibility,
)
# Add any platform specific files to the final tarball.
platform_entries = sc_platform_select(
# We use a different ppc toolchain for Stratum.
# This means that we must provide portable shared libs for our ppc
# executables.
ppc = [native.FilesetEntry(
srcdir = "%s:BUILD" % _PPC_GRTE,
files = [":libs"],
destdir = "lib/stratum",
symlinks = "dereference",
)],
default = [],
)
native.Fileset(
name = fileset_name,
out = name,
entries = [
native.FilesetEntry(
srcdir = decorate(name, "bin"),
destdir = "bin",
),
native.FilesetEntry(
srcdir = decorate(name, "data"),
destdir = "share",
),
] + platform_entries,
visibility = visibility,
)
outs = ["%s.tar.gz" % name]
# Copy our files into a temporary directory and make any necessary changes
# before tarballing.
cmds = [
"TEMP_DIR=$(@D)/stratum_packaging_temp",
"mkdir $${TEMP_DIR}",
"cp -r %s $${TEMP_DIR}/tarball" % _loc(fileset_name),
"if [[ -e $${TEMP_DIR}/tarball/bin ]]",
"then for f in $${TEMP_DIR}/tarball/bin/*.stripped",
" do mv $${f} $${f%.stripped}", # rename not available.
"done",
"fi",
"tar czf %s -h -C $${TEMP_DIR}/tarball ." % _loc(name + ".tar.gz"),
"rm -rf $${TEMP_DIR}",
]
native.genrule(
name = decorate(name, "tarball"),
srcs = [":%s" % fileset_name],
outs = outs,
cmd = "; ".join(cmds),
visibility = visibility,
)
|
updatetranslations.py | erincerys/ergo | 1,122 | 2883 | <reponame>erincerys/ergo<gh_stars>1000+
#!/usr/bin/env python3
# updatetranslations.py
#
# tl;dr this script updates our translation file with the newest, coolest strings we've added!
# it manually searches the source code, extracts strings and then updates the language files.
# Written in 2018 by <NAME> <<EMAIL>>
#
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty.
#
# You should have received a copy of the CC0 Public Domain Dedication along
# with this software. If not, see
# <http://creativecommons.org/publicdomain/zero/1.0/>.
"""updatetranslations.py
Usage:
updatetranslations.py run <irc-dir> <languages-dir>
updatetranslations.py --version
updatetranslations.py (-h | --help)
Options:
<irc-dir> Oragono's irc subdirectory where the Go code is kept.
<languages-dir> Languages directory."""
import os
import re
import json
from docopt import docopt
import yaml
ignored_strings = [
'none', 'saset'
]
if __name__ == '__main__':
arguments = docopt(__doc__, version="0.1.0")
if arguments['run']:
# general IRC strings
irc_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if filepath.endswith('.go'):
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\.t\("((?:[^"]|\\")+)"\)', content)
for match in matches:
if match not in irc_strings:
irc_strings.append(match)
matches = re.findall(r'\.t\(\`([^\`]+)\`\)', content)
for match in matches:
if match not in irc_strings:
irc_strings.append(match)
for s in ignored_strings:
try:
irc_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("irc strings:", len(irc_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'irc.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in irc_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in irc_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'help.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if '\n' in match and match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'help.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string.split('\n')[0])
# nickserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'nickserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("nickserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'nickserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# chanserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'chanserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("chanserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'chanserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
# hostserv help entries
help_strings = []
for subdir, dirs, files in os.walk(arguments['<irc-dir>']):
for fname in files:
filepath = subdir + os.sep + fname
if fname == 'hostserv.go':
content = open(filepath, 'r', encoding='UTF-8').read()
matches = re.findall(r'\`([^\`]+)\`', content)
for match in matches:
if match not in help_strings:
help_strings.append(match)
for s in ignored_strings:
try:
help_strings.remove(s)
except ValueError:
# ignore any that don't exist
...
print("hostserv help strings:", len(help_strings))
with open(os.path.join(arguments['<languages-dir>'], 'example', 'hostserv.lang.json'), 'w') as f:
f.write(json.dumps({k:k for k in help_strings}, sort_keys=True, indent=2, separators=(',', ': ')))
f.write('\n')
for string in help_strings:
if 1 < string.count('%s') + string.count('%d') + string.count('%f'):
print(' confirm:', string)
|
questions/53349623/main.py | sesu089/stackoverflow | 302 | 2893 | <gh_stars>100-1000
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
class Demo(QtWidgets.QWidget):
def __init__(self):
super(Demo, self).__init__()
self.button = QtWidgets.QPushButton()
self.label = QtWidgets.QLabel(alignment=QtCore.Qt.AlignCenter)
self.combo = QtWidgets.QComboBox(self)
self.combo.currentIndexChanged.connect(self.change_func)
self.trans = QtCore.QTranslator(self)
self.v_layout = QtWidgets.QVBoxLayout(self)
self.v_layout.addWidget(self.combo)
self.v_layout.addWidget(self.button)
self.v_layout.addWidget(self.label)
options = ([('English', ''), ('français', 'eng-fr' ), ('中文', 'eng-chs'), ])
for i, (text, lang) in enumerate(options):
self.combo.addItem(text)
self.combo.setItemData(i, lang)
self.retranslateUi()
@QtCore.pyqtSlot(int)
def change_func(self, index):
data = self.combo.itemData(index)
if data:
self.trans.load(data)
QtWidgets.QApplication.instance().installTranslator(self.trans)
else:
QtWidgets.QApplication.instance().removeTranslator(self.trans)
def changeEvent(self, event):
if event.type() == QtCore.QEvent.LanguageChange:
self.retranslateUi()
super(Demo, self).changeEvent(event)
def retranslateUi(self):
self.button.setText(QtWidgets.QApplication.translate('Demo', 'Start'))
self.label.setText(QtWidgets.QApplication.translate('Demo', 'Hello, World'))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) |
generator/generator.py | GregorKikelj/opendbc | 1,059 | 2917 | #!/usr/bin/env python3
import os
import re
cur_path = os.path.dirname(os.path.realpath(__file__))
opendbc_root = os.path.join(cur_path, '../')
include_pattern = re.compile(r'CM_ "IMPORT (.*?)";')
def read_dbc(src_dir, filename):
with open(os.path.join(src_dir, filename)) as file_in:
return file_in.read()
def create_dbc(src_dir, filename, output_path):
dbc_file_in = read_dbc(src_dir, filename)
includes = include_pattern.findall(dbc_file_in)
output_filename = filename.replace('.dbc', '_generated.dbc')
output_file_location = os.path.join(output_path, output_filename)
with open(output_file_location, 'w') as dbc_file_out:
dbc_file_out.write('CM_ "AUTOGENERATED FILE, DO NOT EDIT";\n')
for include_filename in includes:
include_file_header = '\n\nCM_ "Imported file %s starts here";\n' % include_filename
dbc_file_out.write(include_file_header)
include_file = read_dbc(src_dir, include_filename)
dbc_file_out.write(include_file)
dbc_file_out.write('\nCM_ "%s starts here";\n' % filename)
core_dbc = include_pattern.sub('', dbc_file_in)
dbc_file_out.write(core_dbc)
def create_all(output_path):
for src_dir, _, filenames in os.walk(cur_path):
if src_dir == cur_path:
continue
#print(src_dir)
for filename in filenames:
if filename.startswith('_') or not filename.endswith('.dbc'):
continue
#print(filename)
create_dbc(src_dir, filename, output_path)
if __name__ == "__main__":
create_all(opendbc_root)
|
Python/hello_world-theopaid.py | saurabhcommand/Hello-world | 1,428 | 2932 | #Author <NAME>
print("Hello World")
hello_list = ["Hello World"]
print(hello_list[0])
for i in hello_list:
print(i) |
combo/search/discrete/policy.py | zhangkunliang/BayesOptimization | 139 | 2943 | import numpy as np
import copy
import combo.misc
import cPickle as pickle
from results import history
from .. import utility
from ...variable import variable
from ..call_simulator import call_simulator
from ... import predictor
from ...gp import predictor as gp_predictor
from ...blm import predictor as blm_predictor
import combo.search.score
MAX_SEACH = int(20000)
class policy:
def __init__(self, test_X, config=None):
self.predictor = None
self.training = variable()
self.test = self._set_test(test_X)
self.actions = np.arange(0, self.test.X.shape[0])
self.history = history()
self.config = self._set_config(config)
def set_seed(self, seed):
self.seed = seed
np.random.seed(self.seed)
def delete_actions(self, index, actions=None):
actions = self._set_unchosed_actions(actions)
return np.delete(actions, index)
def write(self, action, t, X=None):
if X is None:
X = self.test.X[action, :]
Z = self.test.Z[action, :] if self.test.Z is not None else None
else:
Z = self.predictor.get_basis(X) \
if self.predictor is not None else None
self.new_data = variable(X, t, Z)
self.history.write(t, action)
self.training.add(X=X, t=t, Z=Z)
def random_search(self, max_num_probes, num_search_each_probe=1,
simulator=None, is_disp=True):
N = int(num_search_each_probe)
if int(max_num_probes) * N > len(self.actions):
raise ValueError('max_num_probes * num_search_each_probe must \
be smaller than the length of candidates')
if is_disp:
utility.show_interactive_mode(simulator, self.history)
for n in xrange(0, max_num_probes):
if is_disp and N > 1:
utility.show_start_message_multi_search(self.history.num_runs)
action = self.get_random_action(N)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def bayes_search(self, training=None, max_num_probes=None,
num_search_each_probe=1,
predictor=None, is_disp=True,
simulator=None, score='TS', interval=0,
num_rand_basis=0):
if max_num_probes is None:
max_num_probes = 1
simulator = None
is_rand_expans = False if num_rand_basis == 0 else True
self.training = self._set_training(training)
if predictor is None:
self.predictor = self._init_predictor(is_rand_expans)
else:
self.predictor = predictor
N = int(num_search_each_probe)
for n in xrange(max_num_probes):
if utility.is_learning(n, interval):
self.predictor.fit(self.training, num_rand_basis)
self.test.Z = self.predictor.get_basis(self.test.X)
self.training.Z = self.predictor.get_basis(self.training.X)
self.predictor.prepare(self.training)
else:
try:
self.predictor.update(self.training, self.new_data)
except:
self.predictor.prepare(self.training)
if num_search_each_probe != 1:
utility.show_start_message_multi_search(self.history.num_runs,
score)
K = self.config.search.multi_probe_num_sampling
alpha = self.config.search.alpha
action = self.get_actions(score, N, K, alpha)
if simulator is None:
return action
t, X = call_simulator(simulator, action)
self.write(action, t, X)
if is_disp:
utility.show_search_results(self.history, N)
return copy.deepcopy(self.history)
def get_score(self, mode, predictor=None, training=None, alpha=1):
self._set_training(training)
self._set_predictor(predictor)
actions = self.actions
test = self.test.get_subset(actions)
if mode == 'EI':
f = combo.search.score.EI(predictor, training, test)
elif mode == 'PI':
f = combo.search.score.PI(predictor, training, test)
elif mode == 'TS':
f = combo.search.score.TS(predictor, training, test, alpha)
else:
raise NotImplementedError('mode must be EI, PI or TS.')
return f
def get_marginal_score(self, mode, chosed_actions, N, alpha):
f = np.zeros((N, len(self.actions)))
new_test = self.test.get_subset(chosed_actions)
virtual_t \
= self.predictor.get_predict_samples(self.training, new_test, N)
for n in xrange(N):
predictor = copy.deepcopy(self.predictor)
train = copy.deepcopy(self.training)
virtual_train = new_test
virtual_train.t = virtual_t[n, :]
if virtual_train.Z is None:
train.add(virtual_train.X, virtual_train.t)
else:
train.add(virtual_train.X, virtual_train.t, virtual_train.Z)
try:
predictor.update(train, virtual_train)
except:
predictor.prepare(train)
f[n, :] = self.get_score(mode, predictor, train)
return f
def get_actions(self, mode, N, K, alpha):
f = self.get_score(mode, self.predictor, self.training, alpha)
temp = np.argmax(f)
action = self.actions[temp]
self.actions = self.delete_actions(temp)
chosed_actions = np.zeros(N, dtype=int)
chosed_actions[0] = action
for n in xrange(1, N):
f = self.get_marginal_score(mode, chosed_actions[0:n], K, alpha)
temp = np.argmax(np.mean(f, 0))
chosed_actions[n] = self.actions[temp]
self.actions = self.delete_actions(temp)
return chosed_actions
def get_random_action(self, N):
random_index = np.random.permutation(xrange(self.actions.shape[0]))
index = random_index[0:N]
action = self.actions[index]
self.actions = self.delete_actions(index)
return action
def load(self, file_history, file_training=None, file_predictor=None):
self.history.load(file_history)
if file_training is None:
N = self.history.total_num_search
X = self.test.X[self.history.chosed_actions[0:N], :]
t = self.history.fx[0:N]
self.training = variable(X=X, t=t)
else:
self.training = variable()
self.training.load(file_training)
if file_predictor is not None:
with open(file_predictor) as f:
self.predictor = pickle.load(f)
def export_predictor(self):
return self.predictor
def export_training(self):
return self.training
def export_history(self):
return self.history
def _set_predictor(self, predictor=None):
if predictor is None:
predictor = self.predictor
return predictor
def _init_predictor(self, is_rand_expans, predictor=None):
self.predictor = self._set_predictor(predictor)
if self.predictor is None:
if is_rand_expans:
self.predictor = blm_predictor(self.config)
else:
self.predictor = gp_predictor(self.config)
return self.predictor
def _set_training(self, training=None):
if training is None:
training = self.training
return training
def _set_unchosed_actions(self, actions=None):
if actions is None:
actions = self.actions
return actions
def _set_test(self, test_X):
if isinstance(test_X, np.ndarray):
test = variable(X=test_X)
elif isinstance(test_X, variable):
test = test_X
else:
raise TypeError('The type of test_X must \
take ndarray or combo.variable')
return test
def _set_config(self, config=None):
if config is None:
config = combo.misc.set_config()
return config
|
venv/lib/python3.9/site-packages/py2app/bootstrap/disable_linecache.py | dequeb/asmbattle | 193 | 2944 | <filename>venv/lib/python3.9/site-packages/py2app/bootstrap/disable_linecache.py
def _disable_linecache():
import linecache
def fake_getline(*args, **kwargs):
return ""
linecache.orig_getline = linecache.getline
linecache.getline = fake_getline
_disable_linecache()
|
test/com/facebook/buck/skylark/parser/testdata/rule_with_wrong_types/attr_value_type/subdir/foo.bzl | Unknoob/buck | 8,027 | 2977 | <gh_stars>1000+
""" Module docstring """
def _impl(_ctx):
""" Function docstring """
pass
some_rule = rule(
attrs = {
"attr1": attr.int(
default = 2,
mandatory = False,
),
"attr2": 5,
},
implementation = _impl,
)
|
regtests/calling/function_expression.py | bpmbank/PythonJS | 319 | 2984 | """func expr"""
F = function( x,y ):
return x+y
def main():
TestError( F(1,2) == 3 )
|
src/fabricflow/fibc/api/fibcapis_pb2_grpc.py | RudSmith/beluganos | 119 | 3017 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import fibcapi_pb2 as fibcapi__pb2
import fibcapis_pb2 as fibcapis__pb2
class FIBCApApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCApApi/Monitor',
request_serializer=fibcapis__pb2.ApMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.ApMonitorReply.FromString,
)
self.GetPortStats = channel.unary_stream(
'/fibcapi.FIBCApApi/GetPortStats',
request_serializer=fibcapis__pb2.ApGetPortStatsRequest.SerializeToString,
response_deserializer=fibcapi__pb2.FFPortStats.FromString,
)
self.ModPortStats = channel.unary_unary(
'/fibcapi.FIBCApApi/ModPortStats',
request_serializer=fibcapis__pb2.ApModPortStatsRequest.SerializeToString,
response_deserializer=fibcapis__pb2.ApModPortStatsReply.FromString,
)
self.GetPortEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetPortEntries',
request_serializer=fibcapis__pb2.ApGetPortEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbPortEntry.FromString,
)
self.GetIDEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetIDEntries',
request_serializer=fibcapis__pb2.ApGetIdEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbIdEntry.FromString,
)
self.GetDpEntries = channel.unary_stream(
'/fibcapi.FIBCApApi/GetDpEntries',
request_serializer=fibcapis__pb2.ApGetDpEntriesRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DbDpEntry.FromString,
)
self.AddPortEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/AddPortEntry',
request_serializer=fibcapis__pb2.DbPortEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApAddPortEntryReply.FromString,
)
self.AddIDEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/AddIDEntry',
request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApAddIdEntryReply.FromString,
)
self.DelPortEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/DelPortEntry',
request_serializer=fibcapis__pb2.DbPortKey.SerializeToString,
response_deserializer=fibcapis__pb2.ApDelPortEntryReply.FromString,
)
self.DelIDEntry = channel.unary_unary(
'/fibcapi.FIBCApApi/DelIDEntry',
request_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
response_deserializer=fibcapis__pb2.ApDelIdEntryReply.FromString,
)
self.GetStats = channel.unary_stream(
'/fibcapi.FIBCApApi/GetStats',
request_serializer=fibcapis__pb2.ApGetStatsRequest.SerializeToString,
response_deserializer=fibcapis__pb2.StatsEntry.FromString,
)
self.RunOAM = channel.unary_unary(
'/fibcapi.FIBCApApi/RunOAM',
request_serializer=fibcapi__pb2.OAM.Request.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
class FIBCApApiServicer(object):
# missing associated documentation comment in .proto file
pass
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPortStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ModPortStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPortEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIDEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDpEntries(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddPortEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddIDEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DelPortEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DelIDEntry(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStats(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RunOAM(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCApApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.ApMonitorRequest.FromString,
response_serializer=fibcapis__pb2.ApMonitorReply.SerializeToString,
),
'GetPortStats': grpc.unary_stream_rpc_method_handler(
servicer.GetPortStats,
request_deserializer=fibcapis__pb2.ApGetPortStatsRequest.FromString,
response_serializer=fibcapi__pb2.FFPortStats.SerializeToString,
),
'ModPortStats': grpc.unary_unary_rpc_method_handler(
servicer.ModPortStats,
request_deserializer=fibcapis__pb2.ApModPortStatsRequest.FromString,
response_serializer=fibcapis__pb2.ApModPortStatsReply.SerializeToString,
),
'GetPortEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetPortEntries,
request_deserializer=fibcapis__pb2.ApGetPortEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbPortEntry.SerializeToString,
),
'GetIDEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetIDEntries,
request_deserializer=fibcapis__pb2.ApGetIdEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbIdEntry.SerializeToString,
),
'GetDpEntries': grpc.unary_stream_rpc_method_handler(
servicer.GetDpEntries,
request_deserializer=fibcapis__pb2.ApGetDpEntriesRequest.FromString,
response_serializer=fibcapis__pb2.DbDpEntry.SerializeToString,
),
'AddPortEntry': grpc.unary_unary_rpc_method_handler(
servicer.AddPortEntry,
request_deserializer=fibcapis__pb2.DbPortEntry.FromString,
response_serializer=fibcapis__pb2.ApAddPortEntryReply.SerializeToString,
),
'AddIDEntry': grpc.unary_unary_rpc_method_handler(
servicer.AddIDEntry,
request_deserializer=fibcapis__pb2.DbIdEntry.FromString,
response_serializer=fibcapis__pb2.ApAddIdEntryReply.SerializeToString,
),
'DelPortEntry': grpc.unary_unary_rpc_method_handler(
servicer.DelPortEntry,
request_deserializer=fibcapis__pb2.DbPortKey.FromString,
response_serializer=fibcapis__pb2.ApDelPortEntryReply.SerializeToString,
),
'DelIDEntry': grpc.unary_unary_rpc_method_handler(
servicer.DelIDEntry,
request_deserializer=fibcapis__pb2.DbIdEntry.FromString,
response_serializer=fibcapis__pb2.ApDelIdEntryReply.SerializeToString,
),
'GetStats': grpc.unary_stream_rpc_method_handler(
servicer.GetStats,
request_deserializer=fibcapis__pb2.ApGetStatsRequest.FromString,
response_serializer=fibcapis__pb2.StatsEntry.SerializeToString,
),
'RunOAM': grpc.unary_unary_rpc_method_handler(
servicer.RunOAM,
request_deserializer=fibcapi__pb2.OAM.Request.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCApApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCVmApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendHello',
request_serializer=fibcapi__pb2.Hello.SerializeToString,
response_deserializer=fibcapis__pb2.HelloReply.FromString,
)
self.SendPortConfig = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendPortConfig',
request_serializer=fibcapi__pb2.PortConfig.SerializeToString,
response_deserializer=fibcapis__pb2.PortConfigReply.FromString,
)
self.SendFlowMod = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendFlowMod',
request_serializer=fibcapi__pb2.FlowMod.SerializeToString,
response_deserializer=fibcapis__pb2.FlowModReply.FromString,
)
self.SendGroupMod = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendGroupMod',
request_serializer=fibcapi__pb2.GroupMod.SerializeToString,
response_deserializer=fibcapis__pb2.GroupModReply.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCVmApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCVmApi/Monitor',
request_serializer=fibcapis__pb2.VmMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.VmMonitorReply.FromString,
)
class FIBCVmApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPortConfig(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFlowMod(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendGroupMod(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCVmApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.Hello.FromString,
response_serializer=fibcapis__pb2.HelloReply.SerializeToString,
),
'SendPortConfig': grpc.unary_unary_rpc_method_handler(
servicer.SendPortConfig,
request_deserializer=fibcapi__pb2.PortConfig.FromString,
response_serializer=fibcapis__pb2.PortConfigReply.SerializeToString,
),
'SendFlowMod': grpc.unary_unary_rpc_method_handler(
servicer.SendFlowMod,
request_deserializer=fibcapi__pb2.FlowMod.FromString,
response_serializer=fibcapis__pb2.FlowModReply.SerializeToString,
),
'SendGroupMod': grpc.unary_unary_rpc_method_handler(
servicer.SendGroupMod,
request_deserializer=fibcapi__pb2.GroupMod.FromString,
response_serializer=fibcapis__pb2.GroupModReply.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.VmMonitorRequest.FromString,
response_serializer=fibcapis__pb2.VmMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCVmApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCVsApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendHello',
request_serializer=fibcapi__pb2.FFHello.SerializeToString,
response_deserializer=fibcapis__pb2.FFHelloReply.FromString,
)
self.SendFFPacket = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendFFPacket',
request_serializer=fibcapi__pb2.FFPacket.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketReply.FromString,
)
self.SendPacketIn = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendPacketIn',
request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketInReply.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCVsApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCVsApi/Monitor',
request_serializer=fibcapis__pb2.VsMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.VsMonitorReply.FromString,
)
class FIBCVsApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendFFPacket(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPacketIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCVsApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.FFHello.FromString,
response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString,
),
'SendFFPacket': grpc.unary_unary_rpc_method_handler(
servicer.SendFFPacket,
request_deserializer=fibcapi__pb2.FFPacket.FromString,
response_serializer=fibcapis__pb2.FFPacketReply.SerializeToString,
),
'SendPacketIn': grpc.unary_unary_rpc_method_handler(
servicer.SendPacketIn,
request_deserializer=fibcapi__pb2.FFPacketIn.FromString,
response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.VsMonitorRequest.FromString,
response_serializer=fibcapis__pb2.VsMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCVsApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FIBCDpApiStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SendHello = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendHello',
request_serializer=fibcapi__pb2.FFHello.SerializeToString,
response_deserializer=fibcapis__pb2.FFHelloReply.FromString,
)
self.SendPacketIn = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendPacketIn',
request_serializer=fibcapi__pb2.FFPacketIn.SerializeToString,
response_deserializer=fibcapis__pb2.FFPacketInReply.FromString,
)
self.SendPortStatus = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendPortStatus',
request_serializer=fibcapi__pb2.FFPortStatus.SerializeToString,
response_deserializer=fibcapis__pb2.FFPortStatusReply.FromString,
)
self.SendL2AddrStatus = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendL2AddrStatus',
request_serializer=fibcapi__pb2.FFL2AddrStatus.SerializeToString,
response_deserializer=fibcapis__pb2.L2AddrStatusReply.FromString,
)
self.SendMultipartReply = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendMultipartReply',
request_serializer=fibcapis__pb2.DpMultipartReply.SerializeToString,
response_deserializer=fibcapis__pb2.DpMultipartReplyAck.FromString,
)
self.SendOAMReply = channel.unary_unary(
'/fibcapi.FIBCDpApi/SendOAMReply',
request_serializer=fibcapis__pb2.OAMReply.SerializeToString,
response_deserializer=fibcapis__pb2.OAMReplyAck.FromString,
)
self.Monitor = channel.unary_stream(
'/fibcapi.FIBCDpApi/Monitor',
request_serializer=fibcapis__pb2.DpMonitorRequest.SerializeToString,
response_deserializer=fibcapis__pb2.DpMonitorReply.FromString,
)
class FIBCDpApiServicer(object):
# missing associated documentation comment in .proto file
pass
def SendHello(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPacketIn(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendPortStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendL2AddrStatus(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendMultipartReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SendOAMReply(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Monitor(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FIBCDpApiServicer_to_server(servicer, server):
rpc_method_handlers = {
'SendHello': grpc.unary_unary_rpc_method_handler(
servicer.SendHello,
request_deserializer=fibcapi__pb2.FFHello.FromString,
response_serializer=fibcapis__pb2.FFHelloReply.SerializeToString,
),
'SendPacketIn': grpc.unary_unary_rpc_method_handler(
servicer.SendPacketIn,
request_deserializer=fibcapi__pb2.FFPacketIn.FromString,
response_serializer=fibcapis__pb2.FFPacketInReply.SerializeToString,
),
'SendPortStatus': grpc.unary_unary_rpc_method_handler(
servicer.SendPortStatus,
request_deserializer=fibcapi__pb2.FFPortStatus.FromString,
response_serializer=fibcapis__pb2.FFPortStatusReply.SerializeToString,
),
'SendL2AddrStatus': grpc.unary_unary_rpc_method_handler(
servicer.SendL2AddrStatus,
request_deserializer=fibcapi__pb2.FFL2AddrStatus.FromString,
response_serializer=fibcapis__pb2.L2AddrStatusReply.SerializeToString,
),
'SendMultipartReply': grpc.unary_unary_rpc_method_handler(
servicer.SendMultipartReply,
request_deserializer=fibcapis__pb2.DpMultipartReply.FromString,
response_serializer=fibcapis__pb2.DpMultipartReplyAck.SerializeToString,
),
'SendOAMReply': grpc.unary_unary_rpc_method_handler(
servicer.SendOAMReply,
request_deserializer=fibcapis__pb2.OAMReply.FromString,
response_serializer=fibcapis__pb2.OAMReplyAck.SerializeToString,
),
'Monitor': grpc.unary_stream_rpc_method_handler(
servicer.Monitor,
request_deserializer=fibcapis__pb2.DpMonitorRequest.FromString,
response_serializer=fibcapis__pb2.DpMonitorReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'fibcapi.FIBCDpApi', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
dialogue-engine/test/programytest/config/file/test_json.py | cotobadesign/cotoba-agent-oss | 104 | 3030 | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
from programy.config.file.json_file import JSONConfigurationFile
from programy.clients.events.console.config import ConsoleConfiguration
from programy.utils.substitutions.substitues import Substitutions
from programytest.config.file.base_file_tests import ConfigurationBaseFileTests
class JSONConfigurationFileTests(ConfigurationBaseFileTests):
def test_get_methods(self):
config_data = JSONConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
keys = list(config_data.get_child_section_keys("overrides", section))
self.assertIsNotNone(keys)
self.assertEqual(3, len(keys))
self.assertTrue("allow_system_aiml" in keys)
self.assertTrue("allow_learn_aiml" in keys)
self.assertTrue("allow_learnf_aiml" in keys)
self.assertIsNone(config_data.get_child_section_keys("missing", section))
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_option(child_section, "missing", missing_value=True))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
self.assertEqual(0, config_data.get_int_option(child_section, "other_value"))
def test_load_from_file(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_file(os.path.dirname(__file__) + os.sep + "test_json.json", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
def test_load_from_text_multis_one_value(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"bot": {
"brain": "bot1"
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(1, len(configuration.client_configuration.configurations[0].configurations))
def test_load_from_text_multis_multiple_values(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": ["bot1", "bot2"]
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assertEqual(2, len(configuration.client_configuration.configurations[0].configurations))
def test_load_from_text(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot",
"prompt": ">>>",
"scheduler": {
"name": "Scheduler1",
"debug_level": 50,
"add_listeners": false,
"remove_all_jobs": false
},
"storage": {
"entities": {
"users": "sql",
"linked_accounts": "sql",
"links": "sql",
"properties": "file",
"conversations": "file",
"categories": "file",
"maps": "file",
"sets": "file",
"rdf": "file",
"denormal": "file",
"normal": "file",
"gender": "file",
"person": "file",
"person2": "file",
"spelling_corpus": "file",
"license_keys": "file",
"nodes": "file",
"binaries": "file",
"braintree": "file",
"preprocessors": "file",
"postprocessors": "file",
"regex_templates": "file",
"usergroups": "file",
"learnf": "file"
},
"stores": {
"sql": {
"type": "sql",
"config": {
"url": "sqlite:///:memory",
"echo": false,
"encoding": "utf-8",
"create_db": true,
"drop_all_first": true
}
},
"mongo": {
"type": "mongo",
"config": {
"url": "mongodb://localhost:27017/",
"database": "programy",
"drop_all_first": true
}
},
"redis": {
"type": "redis",
"config": {
"host": "localhost",
"port": 6379,
"password": <PASSWORD>,
"db": 0,
"prefix": "programy",
"drop_all_first": true
}
},
"file": {
"type": "file",
"config": {
"category_storage": {
"files": "./storage/categories"
},
"conversations_storage": {
"files": "./storage/conversations"
},
"sets_storage": {
"files": "./storage/sets",
"extension": ".txt",
"directories": false
},
"maps_storage": {
"files": "./storage/maps",
"extension": ".txt",
"directories": false
},
"regex_templates": {
"files": "./storage/regex"
},
"lookups_storage": {
"files": "./storage/lookups",
"extension": ".txt",
"directories": false
},
"properties_storage": {
"file": "./storage/properties.txt"
},
"defaults_storage": {
"file": "./storage/defaults.txt"
},
"rdf_storage": {
"files": "./storage/rdfs",
"extension": ".txt",
"directories": true
},
"spelling_corpus": {
"file": "./storage/spelling/corpus.txt"
},
"license_keys": {
"file": "./storage/license.keys"
},
"nodes": {
"files": "./storage/nodes"
},
"binaries": {
"files": "./storage/binaries"
},
"braintree": {
"file": "./storage/braintree/braintree.xml",
"format": "xml"
},
"preprocessors": {
"file": "./storage/processing/preprocessors.txt"
},
"postprocessors": {
"file": "./storage/processing/postprocessing.txt"
},
"usergroups": {
"files": "./storage/security/usergroups.txt"
},
"learnf": {
"files": "./storage/categories/learnf"
}
}
}
}
},
"logger": {
"type": "logger",
"config": {
"conversation_logger": "conversation"
}
}
},
"voice": {
"license_keys": "$BOT_ROOT/config/license.keys",
"tts": "osx",
"stt": "azhang",
"osx": {
"classname": "talky.clients.voice.tts.osxsay.OSXSayTextToSpeach"
},
"pytts": {
"classname": "talky.clients.voice.tts.pyttssay.PyTTSSayTextToSpeach",
"rate_adjust": 10
},
"azhang": {
"classname": "talky.clients.voice.stt.azhang.AnthonyZhangSpeechToText",
"ambient_adjust": 3,
"service": "ibm"
}
},
"rest": {
"host": "0.0.0.0",
"port": 8989,
"debug": false,
"workers": 4,
"license_keys": "$BOT_ROOT/config/license.keys"
},
"webchat": {
"host": "0.0.0.0",
"port": 8090,
"debug": false,
"license_keys": "$BOT_ROOT/config/license.keys",
"api": "/api/web/v1.0/ask"
},
"twitter": {
"polling": true,
"polling_interval": 49,
"streaming": false,
"use_status": true,
"use_direct_message": true,
"auto_follow": true,
"storage": "file",
"welcome_message": "Thanks for following me, send me a message and I'll try and help",
"license_keys": "file"
},
"xmpp": {
"server": "talk.google.com",
"port": 5222,
"xep_0030": true,
"xep_0004": true,
"xep_0060": true,
"xep_0199": true,
"license_keys": "file"
},
"socket": {
"host": "127.0.0.1",
"port": 9999,
"queue": 5,
"debug": true,
"license_keys": "file"
},
"telegram": {
"unknown_command": "Sorry, that is not a command I have been taught yet!",
"license_keys": "file"
},
"facebook": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"twilio": {
"host": "127.0.0.1",
"port": 5000,
"debug": false,
"license_keys": "file"
},
"slack": {
"polling_interval": 1,
"license_keys": "file"
},
"viber": {
"name": "Servusai",
"avatar": "http://viber.com/avatar.jpg",
"license_keys": "file"
},
"line": {
"host": "127.0.0.1",
"port": 8084,
"debug": false,
"license_keys": "file"
},
"kik": {
"bot_name": "servusai",
"webhook": "https://93638f7a.ngrok.io/api/kik/v1.0/ask",
"host": "127.0.0.1",
"port": 8082,
"debug": false,
"license_keys": "file"
},
"bot": {
"brain": "brain",
"initial_question": "Hi, how can I help you today?",
"initial_question_srai": "YINITIALQUESTION",
"default_response": "Sorry, I don't have an answer for that!",
"default_response_srai": "YEMPTY",
"empty_string": "YEMPTY",
"exit_response": "So long, and thanks for the fish!",
"exit_response_srai": "YEXITRESPONSE",
"override_properties": true,
"max_question_recursion": 1000,
"max_question_timeout": 60,
"max_search_depth": 100,
"max_search_timeout": 60,
"spelling": {
"load": true,
"classname": "programy.spelling.norvig.NorvigSpellingChecker",
"check_before": true,
"check_and_retry": true
},
"conversations": {
"max_histories": 100,
"restore_last_topic": false,
"initial_topic": "TOPIC1",
"empty_on_start": false
}
},
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
},
"defaults": {
"default-get": "unknown",
"default-property": "unknown",
"default-map": "unknown",
"learnf-path": "file"
},
"binaries": {
"save_binary": true,
"load_binary": true,
"load_aiml_on_binary_fail": true
},
"braintree": {
"create": true
},
"services": {
"REST": {
"classname": "programy.services.rest.GenericRESTService",
"method": "GET",
"host": "0.0.0.0",
"port": 8080
},
"Pannous": {
"classname": "programy.services.pannous.PannousService",
"url": "http://weannie.pannous.com/api"
}
},
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.BasicPassThroughAuthenticationService",
"denied_srai": "AUTHENTICATION_FAILED"
},
"authorisation": {
"classname": "programy.security.authorise.usergroupsauthorisor.BasicUserGroupAuthorisationService",
"denied_srai": "AUTHORISATION_FAILED",
"usergroups": {
"storage": "file"
}
}
},
"oob": {
"default": {
"classname": "programy.oob.defaults.default.DefaultOutOfBandProcessor"
},
"alarm": {
"classname": "programy.oob.defaults.alarm.AlarmOutOfBandProcessor"
},
"camera": {
"classname": "programy.oob.defaults.camera.CameraOutOfBandProcessor"
},
"clear": {
"classname": "programy.oob.defaults.clear.ClearOutOfBandProcessor"
},
"dial": {
"classname": "programy.oob.defaults.dial.DialOutOfBandProcessor"
},
"dialog": {
"classname": "programy.oob.defaults.dialog.DialogOutOfBandProcessor"
},
"email": {
"classname": "programy.oob.defaults.email.EmailOutOfBandProcessor"
},
"geomap": {
"classname": "programy.oob.defaults.map.MapOutOfBandProcessor"
},
"schedule": {
"classname": "programy.oob.defaults.schedule.ScheduleOutOfBandProcessor"
},
"search": {
"classname": "programy.oob.defaults.search.SearchOutOfBandProcessor"
},
"sms": {
"classname": "programy.oob.defaults.sms.SMSOutOfBandProcessor"
},
"url": {
"classname": "programy.oob.defaults.url.URLOutOfBandProcessor"
},
"wifi": {
"classname": "programy.oob.defaults.wifi.WifiOutOfBandProcessor"
}
},
"dynamic": {
"variables": {
"gettime": "programy.dynamic.variables.datetime.GetTime"
},
"sets": {
"numeric": "programy.dynamic.sets.numeric.IsNumeric",
"roman": "programy.dynamic.sets.roman.IsRomanNumeral"
},
"maps": {
"romantodec": "programy.dynamic.maps.roman.MapRomanToDecimal",
"dectoroman": "programy.dynamic.maps.roman.MapDecimalToRoman"
}
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
self.assert_configuration(configuration)
def test_load_additionals(self):
config = JSONConfigurationFile()
self.assertIsNotNone(config)
configuration = config.load_from_text("""
{
"console": {
"bot": "bot"
},
"bot": {
"brain": "brain"
},
"brain": {
"security": {
"authentication": {
"classname": "programy.security.authenticate.passthrough.PassThroughAuthenticationService",
"denied_srai": "ACCESS_DENIED"
}
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
auth_service = configuration.client_configuration.configurations[0].configurations[0].security.authentication
self.assertIsNotNone(auth_service)
self.assertEqual("ACCESS_DENIED", auth_service.denied_srai)
def test_load_with_subs(self):
subs = Substitutions()
subs.add_substitute("$ALLOW_SYSTEM", True)
config_data = JSONConfigurationFile()
self.assertIsNotNone(config_data)
configuration = config_data.load_from_text("""
{
"brain": {
"overrides": {
"allow_system_aiml": true,
"allow_learn_aiml": true,
"allow_learnf_aiml": true
}
}
}
""", ConsoleConfiguration(), ".")
self.assertIsNotNone(configuration)
section = config_data.get_section("brainx")
self.assertIsNone(section)
section = config_data.get_section("brain")
self.assertIsNotNone(section)
child_section = config_data.get_section("overrides", section)
self.assertIsNotNone(child_section)
self.assertEqual(True, config_data.get_option(child_section, "allow_system_aiml"))
self.assertEqual(True, config_data.get_bool_option(child_section, "allow_system_aiml"))
self.assertEqual(False, config_data.get_bool_option(child_section, "other_value"))
|
tests/components/evil_genius_labs/test_light.py | liangleslie/core | 30,023 | 3036 | <gh_stars>1000+
"""Test Evil Genius Labs light."""
from unittest.mock import patch
import pytest
from homeassistant.components.light import (
ATTR_COLOR_MODE,
ATTR_SUPPORTED_COLOR_MODES,
ColorMode,
LightEntityFeature,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES
@pytest.mark.parametrize("platforms", [("light",)])
async def test_works(hass, setup_evil_genius_labs):
"""Test it works."""
state = hass.states.get("light.fibonacci256_23d4")
assert state is not None
assert state.state == "on"
assert state.attributes["brightness"] == 128
assert state.attributes[ATTR_COLOR_MODE] == ColorMode.RGB
assert state.attributes[ATTR_SUPPORTED_COLOR_MODES] == [ColorMode.RGB]
assert state.attributes[ATTR_SUPPORTED_FEATURES] == LightEntityFeature.EFFECT
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_color(hass, setup_evil_genius_labs):
"""Test turning on with a color."""
with patch(
"pyevilgenius.EvilGeniusDevice.set_path_value"
) as mock_set_path_value, patch(
"pyevilgenius.EvilGeniusDevice.set_rgb_color"
) as mock_set_rgb_color:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"brightness": 100,
"rgb_color": (10, 20, 30),
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("brightness", 100)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
assert len(mock_set_rgb_color.mock_calls) == 1
mock_set_rgb_color.mock_calls[0][1] == (10, 20, 30)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_on_effect(hass, setup_evil_genius_labs):
"""Test turning on with an effect."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_on",
{
"entity_id": "light.fibonacci256_23d4",
"effect": "Pride Playground",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 2
mock_set_path_value.mock_calls[0][1] == ("pattern", 4)
mock_set_path_value.mock_calls[1][1] == ("power", 1)
@pytest.mark.parametrize("platforms", [("light",)])
async def test_turn_off(hass, setup_evil_genius_labs):
"""Test turning off."""
with patch("pyevilgenius.EvilGeniusDevice.set_path_value") as mock_set_path_value:
await hass.services.async_call(
"light",
"turn_off",
{
"entity_id": "light.fibonacci256_23d4",
},
blocking=True,
)
assert len(mock_set_path_value.mock_calls) == 1
mock_set_path_value.mock_calls[0][1] == ("power", 0)
|
post_office/validators.py | fasih/django-post_office | 661 | 3052 | from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.utils.encoding import force_str
def validate_email_with_name(value):
"""
Validate email address.
Both "<NAME> <<EMAIL>>" and "<EMAIL>" are valid.
"""
value = force_str(value)
recipient = value
if '<' in value and '>' in value:
start = value.find('<') + 1
end = value.find('>')
if start < end:
recipient = value[start:end]
validate_email(recipient)
def validate_comma_separated_emails(value):
"""
Validate every email address in a comma separated list of emails.
"""
if not isinstance(value, (tuple, list)):
raise ValidationError('Email list must be a list/tuple.')
for email in value:
try:
validate_email_with_name(email)
except ValidationError:
raise ValidationError('Invalid email: %s' % email, code='invalid')
def validate_template_syntax(source):
"""
Basic Django Template syntax validation. This allows for robuster template
authoring.
"""
try:
Template(source)
except (TemplateSyntaxError, TemplateDoesNotExist) as err:
raise ValidationError(str(err))
|
Convert Integer A to Integer B.py | RijuDasgupta9116/LintCode | 321 | 3072 | <reponame>RijuDasgupta9116/LintCode
"""
Determine the number of bits required to convert integer A to integer B
Example
Given n = 31, m = 14,return 2
(31)10=(11111)2
(14)10=(01110)2
"""
__author__ = 'Danyang'
class Solution:
def bitSwapRequired(self, a, b):
"""
:param a:
:param b:
:return: int
"""
a = self.to_bin(a)
b = self.to_bin(b)
diff = len(a)-len(b)
ret = 0
if diff<0:
a, b = b, a
diff *= -1
b = "0"*diff+b
for i in xrange(len(b)):
if a[i]!=b[i]:
ret += 1
return ret
def to_bin(self, n):
"""
2's complement
32-bit
:param n:
:return:
"""
"""
:param n:
:return:
"""
a = abs(n)
lst = []
while a>0:
lst.append(a%2)
a /= 2
# 2's complement
if n>=0:
lst.extend([0]*(32-len(lst)))
else:
pivot = -1
for i in xrange(len(lst)):
if pivot==-1 and lst[i]==1:
pivot = i
continue
if pivot!=-1:
lst[i] ^= 1
lst.extend([1]*(32-len(lst)))
return "".join(map(str, reversed(lst)))
if __name__=="__main__":
assert Solution().bitSwapRequired(1, -1)==31
assert Solution().bitSwapRequired(31, 14)==2
|
modules/dbnd/src/dbnd/_core/tracking/managers/callable_tracking.py | busunkim96/dbnd | 224 | 3084 | import contextlib
import logging
import typing
from typing import Any, Dict, Tuple
import attr
from dbnd._core.configuration import get_dbnd_project_config
from dbnd._core.constants import (
RESULT_PARAM,
DbndTargetOperationStatus,
DbndTargetOperationType,
TaskRunState,
)
from dbnd._core.current import (
current_task_run,
get_databand_run,
is_verbose,
try_get_current_task,
)
from dbnd._core.errors.errors_utils import log_exception
from dbnd._core.log.external_exception_logging import log_exception_to_server
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.parameter.parameter_value import ParameterFilters
from dbnd._core.settings import TrackingConfig
from dbnd._core.task.tracking_task import TrackingTask
from dbnd._core.task_build.task_context import try_get_current_task
from dbnd._core.task_build.task_definition import TaskDefinition
from dbnd._core.task_build.task_results import FuncResultParameter
from dbnd._core.task_run.task_run import TaskRun
from dbnd._core.task_run.task_run_error import TaskRunError
from dbnd._core.utils.callable_spec import args_to_kwargs
from dbnd._core.utils.timezone import utcnow
from targets import InMemoryTarget, Target
from targets.value_meta import ValueMetaConf
from targets.values import get_value_type_of_obj
if typing.TYPE_CHECKING:
from dbnd._core.task_build.task_decorator import TaskDecorator
logger = logging.getLogger(__name__)
@attr.s
class TrackedFuncCallWithResult(object):
call_args = attr.ib() # type: Tuple[Any]
call_kwargs = attr.ib() # type: Dict[str,Any]
callable = attr.ib()
result = attr.ib(default=None)
def set_result(self, value):
self.result = value
return value
def invoke(self):
func = self.callable
return func(*self.call_args, **self.call_kwargs)
class CallableTrackingManager(object):
def __init__(self, task_decorator):
# type: (CallableTrackingManager, TaskDecorator) -> None
self.task_decorator = task_decorator
self._tracking_task_definition = None
self._call_count = 0
self._call_as_func = False
self._max_call_count = get_dbnd_project_config().max_calls_per_run
@property
def callable(self):
return self.task_decorator.class_or_func
def get_tracking_task_definition(self):
if not self._tracking_task_definition:
self._tracking_task_definition = self._build_tracking_task_definition()
return self._tracking_task_definition
def _build_tracking_task_definition(self):
return TaskDefinition.from_task_decorator(task_decorator=self.task_decorator)
def _call_count_limit_exceeded(self):
if not self._call_as_func:
self._call_count += 1
if self._call_count > self._max_call_count:
logger.info(
"Reached maximum tracking limit of {} tasks. Running function regularly.".format(
self._max_call_count
)
)
self._call_as_func = True
return self._call_as_func
@contextlib.contextmanager
def tracking_context(self, call_args, call_kwargs):
user_code_called = False # whether we got to executing of user code
user_code_finished = False # whether we passed executing of user code
func_call = None
try:
# 1. check that we don't have too many calls
if self._call_count_limit_exceeded():
yield _do_nothing_decorator
return
# 2. Start or reuse existing "main tracking task" that is root for tracked tasks
if not try_get_current_task():
"""
try to get existing task, and if not exists - try to get/create inplace_task_run
"""
from dbnd._core.tracking.script_tracking_manager import (
try_get_inplace_tracking_task_run,
)
inplace_tacking_task = try_get_inplace_tracking_task_run()
if not inplace_tacking_task:
# we didn't manage to start inplace tracking task run, we will not be able to track
yield _do_nothing_decorator
return
tracking_task_definition = self.get_tracking_task_definition()
callable_spec = tracking_task_definition.task_decorator.get_callable_spec()
func_call = TrackedFuncCallWithResult(
callable=self.callable,
call_args=tuple(call_args), # prevent original call_args modification
call_kwargs=dict(call_kwargs), # prevent original kwargs modification
)
# replace any position argument with kwarg if it possible
args, kwargs = args_to_kwargs(
callable_spec.args, func_call.call_args, func_call.call_kwargs,
)
# instantiate inline task
task = TrackingTask.for_func(tracking_task_definition, args, kwargs)
# update upstream/downstream relations - needed for correct tracking
# we can have the task as upstream , as it was executed already
parent_task = current_task_run().task
if not parent_task.task_dag.has_upstream(task):
parent_task.set_upstream(task)
# checking if any of the inputs are the outputs of previous task.
# we can add that task as upstream.
dbnd_run = get_databand_run()
call_kwargs_as_targets = dbnd_run.target_origin.get_for_map(kwargs)
for value_origin in call_kwargs_as_targets.values():
up_task = value_origin.origin_target.task
task.set_upstream(up_task)
# creating task_run as a task we found mid-run
task_run = dbnd_run.create_task_run_at_execution_time(
task, task_engine=current_task_run().task_engine
)
should_capture_log = TrackingConfig.current().capture_tracking_log
with task_run.runner.task_run_execution_context(
handle_sigterm=True, capture_log=should_capture_log
):
task_run.set_task_run_state(state=TaskRunState.RUNNING)
_log_inputs(task_run)
# if we reached this line, then all tracking initialization is
# finished successfully, and we're going to execute user code
user_code_called = True
try:
# tracking_context is context manager - user code will run on yield
yield func_call.set_result
# if we reached this line, this means that user code finished
# successfully without any exceptions
user_code_finished = True
except Exception as ex:
task_run.finished_time = utcnow()
error = TaskRunError.build_from_ex(ex, task_run)
task_run.set_task_run_state(TaskRunState.FAILED, error=error)
raise
else:
task_run.finished_time = utcnow()
# func_call.result should contain result, log it
_log_result(task_run, func_call.result)
task_run.set_task_run_state(TaskRunState.SUCCESS)
except Exception:
if user_code_called and not user_code_finished:
# if we started to call the user code and not got to user_code_finished
# line - it means there was user code exception - so just re-raise it
raise
# else it's either we didn't reached calling user code, or already passed it
# then it's some dbnd tracking error - just log it
if func_call:
_handle_tracking_error("tracking-init", func_call)
else:
log_exception_to_server()
# if we didn't reached user_code_called=True line - there was an error during
# dbnd tracking initialization, so nothing is done - user function wasn't called yet
if not user_code_called:
# tracking_context is context manager - user code will run on yield
yield _do_nothing_decorator
return
def _handle_tracking_error(msg, func_call=None):
log_exception_to_server()
location = " for %s" % func_call.callable if func_call else ""
msg = "Failed during dbnd %s for %s, ignoring, and continue without tracking" % (
msg,
location,
)
if is_verbose():
logger.warning(
msg, exc_info=True,
)
else:
logger.info(msg)
def _do_nothing_decorator(f):
return f
def _log_inputs(task_run):
"""
For tracking mode. Logs InMemoryTarget inputs.
"""
try:
params = task_run.task._params
for param_value in params.get_param_values(ParameterFilters.INPUTS):
param, value = param_value.parameter, param_value.value
if isinstance(param_value, InMemoryTarget):
try:
param = param.modify(
value_meta_conf=ValueMetaConf(
log_preview=True, log_schema=True,
)
)
task_run.tracker.log_parameter_data(
parameter=param,
target=param_value,
value=value,
operation_type=DbndTargetOperationType.read,
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log input param to tracking store.",
ex=ex,
non_critical=True,
)
except Exception as ex:
log_exception(
"Failed to log input params to tracking store.", ex=ex, non_critical=True
)
def _log_result(task_run, result):
# type: (TaskRun, Any) -> None
"""
For tracking mode. Logs the task result and adds it to the target_origin map to support relationships between
dynamic tasks.
"""
try:
result_param = task_run.task.task_params.get_param_value(RESULT_PARAM)
if not result_param:
logger.debug(
"No result params to log for task {}".format(task_run.task_af_id)
)
return
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
result_param_def, result_target = result_param.parameter, result_param.value
# spread result into relevant fields.
if isinstance(result_param_def, FuncResultParameter):
# assign all returned values to relevant band Outputs
if result is None:
return
for result_name, value in result_param_def.named_results(result):
# we now the parameter value is a target because this is an output param
# the target is created in the task creation
parameter_value = task_run.task.task_params.get_param_value(result_name)
_log_parameter_value(
task_run,
parameter_definition=parameter_value.parameter,
target=parameter_value.value,
value=value,
)
else:
_log_parameter_value(
task_run,
parameter_definition=result_param_def,
target=result_target,
value=result,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
def _log_parameter_value(task_run, parameter_definition, target, value):
# type: (TaskRun, ParameterDefinition, Target, Any) -> None
# make sure it will be logged correctly
parameter_definition = parameter_definition.modify(
value_meta_conf=ValueMetaConf(log_preview=True, log_schema=True)
)
try:
# case what if result is Proxy
value_type = get_value_type_of_obj(value, parameter_definition.value_type)
task_run.run.target_origin.add(target, value, value_type)
except Exception as ex:
log_exception(
"Failed to register result to target tracking.", ex=ex, non_critical=True
)
try:
task_run.tracker.log_parameter_data(
parameter=parameter_definition, # was: task_run.task.task_definition.task_class.result,
target=target,
value=value,
operation_type=DbndTargetOperationType.write, # is it write? (or log?)
operation_status=DbndTargetOperationStatus.OK,
)
except Exception as ex:
log_exception(
"Failed to log result to tracking store.", ex=ex, non_critical=True
)
|
tests/test_bindiff.py | Kyle-Kyle/angr | 6,132 | 3087 | import nose
import angr
import logging
l = logging.getLogger("angr.tests.test_bindiff")
import os
test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')
# todo make a better test
def test_bindiff_x86_64():
binary_path_1 = os.path.join(test_location, 'x86_64', 'bindiff_a')
binary_path_2 = os.path.join(test_location, 'x86_64', 'bindiff_b')
b = angr.Project(binary_path_1, load_options={"auto_load_libs": False})
b2 = angr.Project(binary_path_2, load_options={"auto_load_libs": False})
bindiff = b.analyses.BinDiff(b2)
identical_functions = bindiff.identical_functions
differing_functions = bindiff.differing_functions
unmatched_functions = bindiff.unmatched_functions
# check identical functions
nose.tools.assert_in((0x40064c, 0x40066a), identical_functions)
# check differing functions
nose.tools.assert_in((0x400616, 0x400616), differing_functions)
# check unmatched functions
nose.tools.assert_less_equal(len(unmatched_functions[0]), 1)
nose.tools.assert_less_equal(len(unmatched_functions[1]), 2)
# check for no major regressions
nose.tools.assert_greater(len(identical_functions), len(differing_functions))
nose.tools.assert_less(len(differing_functions), 4)
# check a function diff
fdiff = bindiff.get_function_diff(0x400616, 0x400616)
block_matches = { (a.addr, b.addr) for a, b in fdiff.block_matches }
nose.tools.assert_in((0x40064a, 0x400668), block_matches)
nose.tools.assert_in((0x400616, 0x400616), block_matches)
nose.tools.assert_in((0x40061e, 0x40061e), block_matches)
def run_all():
functions = globals()
all_functions = dict(filter((lambda kv: kv[0].startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.bindiff").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
2018/finals/pwn-gdb-as-a-service/web_challenge/challenge/gaas.py | iicarus-bit/google-ctf | 2,757 | 3089 | #!/usr/bin/env python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from aiohttp import web
import capstone
import functools
from gdbproc import GDBProcess
import socketio
import asyncio
import codecs
import os
enable_logging = False
premium = 'PREMIUM' in os.environ
if premium:
access_key = os.getenv('PREMIUM_KEY')
runnable = ['/home/user/printwebflag']
else:
access_key = os.getenv('TRIAL_KEY')
runnable = ['/bin/sleep', '20']
MAX_INSN_LEN = 15
capstone_md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64)
sio = socketio.AsyncServer()
app = web.Application()
sio.attach(app)
with open('index.html') as f:
index_html = f.read()
async def index(request):
if not 'key' in request.cookies:
return web.Response(status=401, text='permission denied (missing key)', content_type='text/html')
if request.cookies['key'] != access_key:
return web.Response(status=401, text='permission denied (invalid key)', content_type='text/html')
return web.Response(text=index_html, content_type='text/html')
app.add_routes([web.get('/', index),
web.get('/{name}', index)])
gdb_sessions = {}
stop_queue_readers = {}
async def on_shutdown(app):
await asyncio.gather(delete_gdb_process(sid) for sid in gdb_sessions.keys())
app.on_shutdown.append(on_shutdown)
def log(msg):
if enable_logging:
print('[*] {}'.format(msg))
@sio.on('connect')
def connect(sid, environ):
log('connected {}'.format(sid))
if not 'key={}'.format(access_key) in environ['HTTP_COOKIE']:
log('access_key not found {}'.format(environ['HTTP_COOKIE']))
return False
@sio.on('disconnect')
async def disconnect(sid):
log('disconnected {}'.format(sid))
await delete_gdb_process(sid)
async def stop_queue_reader(sid, queue):
while True:
pkt = await queue.get()
await update_all(sid)
async def create_gdb_process(sid):
stop_queue = asyncio.Queue()
gdb_sessions[sid] = await GDBProcess.create(runnable, stop_queue, env={'KEY': access_key}, log_fn=log)
loop = asyncio.get_event_loop()
stop_queue_readers[sid] = loop.create_task(stop_queue_reader(sid, stop_queue))
async def delete_gdb_process(sid):
if sid in gdb_sessions:
stop_queue_readers[sid].cancel()
del stop_queue_readers[sid]
await gdb_sessions[sid].release()
del gdb_sessions[sid]
@sio.on('start')
async def start(sid):
await delete_gdb_process(sid)
await create_gdb_process(sid)
# Reading registers doesn't work on ubuntu 18.04 for some reason.
# Step once as a work around
step(sid)
async def update_all(sid):
log('updating sid {}'.format(sid))
regs_task = getregs(sid)
maps_task = getmaps(sid)
asm_task = getasm(sid, {'addr': await gdb_sessions[sid].get_reg('rip'), 'count': 100})
await asyncio.gather(regs_task, maps_task, asm_task)
log('update done')
@sio.on('step')
def step(sid):
gdb_sessions[sid].step()
@sio.on('cont')
def cont(sid):
gdb_sessions[sid].cont()
@sio.on('stop')
def stop(sid):
gdb_sessions[sid].interrupt()
async def getregs(sid):
regs = await gdb_sessions[sid].get_regs()
await sio.emit('regs', regs, room=sid)
@sio.on('mem')
async def getmem(sid, msg):
addr = msg['addr']
count = msg['count']
data = gdb_sessions[sid].read_mem(addr, count)
await sio.emit('mem', {'addr': addr, 'data': data}, room=sid)
async def getmaps(sid):
maps = gdb_sessions[sid].maps()
await sio.emit('maps', maps, room=sid)
@sio.on('break')
async def setbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].set_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('unbreak')
async def rmbreakpoint(sid, data):
addr = data['addr']
await gdb_sessions[sid].remove_breakpoint(addr)
await sio.emit('breakpoints', gdb_sessions[sid].breakpoints(), room=sid)
@sio.on('search')
async def search(sid, data):
q = data['q']
qtype = data['type']
await sio.emit('search_result', gdb_sessions[sid].search(q.encode(), qtype), room=sid)
async def getasm(sid, data):
addr = data['addr']
count = data['count']
result = []
for _ in range(count):
data = gdb_sessions[sid].read_mem(addr, MAX_INSN_LEN)
try:
disasm = next(capstone_md.disasm_lite(data, addr))
except StopIteration:
break
result.append(disasm)
addr += disasm[1]
await sio.emit('asm', result, room=sid)
if __name__ == '__main__':
web.run_app(app)
|
contrib/functional_tests/functional/test_reorg.py | electrumsv/electrumsv | 136 | 3113 | <reponame>electrumsv/electrumsv<filename>contrib/functional_tests/functional/test_reorg.py
"""
Warning - this will reset all components back to a blank state before running the simulation
Runs node1, electrumx1 and electrumsv1 and loads the default wallet on the daemon (so that newly
submitted blocks will be synchronized by ElectrumSV
reorged txid: 'a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a'
"""
import asyncio
import os
from pathlib import Path
import pytest
import pytest_asyncio
from electrumsv_node import electrumsv_node
from electrumsv_sdk import utils
import logging
import requests
from contrib.functional_tests.websocket_client import TxStateWSClient
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("simulate-fresh-reorg")
async def wait_for_reog_transaction_update(reorged_txids, reorg_height):
MAX_WAIT_TIME = 10 # seconds
async with TxStateWSClient() as ws_client:
try:
await asyncio.wait_for(ws_client.block_until_confirmed_and_height_updated(
reorged_txids, reorg_height), MAX_WAIT_TIME)
except asyncio.TimeoutError:
logger.exception(f"timed out after {MAX_WAIT_TIME} seconds")
raise
class TestReorg:
@classmethod
def setup_class(cls):
pass
@classmethod
def teardown_class(cls):
pass
@pytest.mark.asyncio
def test_reorg(self, event_loop):
async def test_reorg():
payload = {
"password": "<PASSWORD>"
}
REORGED_TXIDS = "a1fa9460ca105c1396cd338f7fa202bf79a9d244d730e91e19f6302a05b2f07a"
# Load the default wallet on ElectrumSV daemon
url = f"http://127.0.0.1:9999/v1/regtest/dapp/wallets/worker1.sqlite/load_wallet"
result = requests.post(url, json=payload)
result.raise_for_status()
# Submit node1 blocks to node
if electrumsv_node.is_node_running():
utils.submit_blocks_from_file(node_id='node1',
filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node1_blocks.dat'))
else:
logger.exception("node unavailable")
try:
await wait_for_reog_transaction_update([REORGED_TXIDS], 201)
# Todo check state of get_balance; get_coin_state; get_transaction_history
# Submit node2 blocks to node
if electrumsv_node.is_node_running():
utils.submit_blocks_from_file(node_id='node1',
filepath=Path(MODULE_DIR).joinpath('../reorg_blocks/node2_blocks.dat'))
else:
logger.exception("node unavailable")
await wait_for_reog_transaction_update([REORGED_TXIDS], 202)
except asyncio.TimeoutError:
pytest.xfail("work in progress alongside refactoring changes...")
# Todo check state of get_balance; get_coin_state; get_transaction_history
event_loop.run_until_complete(test_reorg())
|
torch/_fx/graph_module.py | jsun94/nimble | 206 | 3134 | <gh_stars>100-1000
import torch
import torch.overrides
import linecache
from typing import Type, Dict, List, Any, Union
from .graph import Graph
import copy
# normal exec loses the source code, however we can patch
# the linecache module to still recover it.
# using exec_with_source will add it to our local cache
# and then tools like TorchScript will be able to get source info.
_next_id = 0
def exec_with_source(src: str, globals: Dict[str, Any]):
global _next_id
key = f'<eval_with_key_{_next_id}>'
_next_id += 1
_eval_cache[key] = [line + '\n' for line in src.splitlines()]
exec(compile(src, key, 'exec'), globals)
# patch linecache so that any code we exec using exec_with_source
# works with inspect
_eval_cache : Dict[str, List[str]] = {}
_orig_getlines = linecache.getlines
def patched_getline(*args, **kwargs):
if args[0] in _eval_cache:
return _eval_cache[args[0]]
return _orig_getlines(*args, **kwargs)
linecache.getlines = patched_getline
def _forward_from_src(src : str):
gbls: Dict[str, Any] = {
'torch': torch
}
exec_with_source(src, gbls)
return gbls['forward']
def deserialize_graphmodule(body : dict) -> torch.nn.Module:
"""
Deserialize a GraphModule given the dictionary of the original module,
using the code to reconstruct the graph. We delete the actual graph before
saving the dictionary so that changes to the in-memory graph format do not
get serialized.
"""
# We create a dummy class here because symbolic_trace pulls the forward()
# function off of the class, rather than the instance
class CodeOnlyModule(torch.nn.Module):
def __init__(self, body):
super().__init__()
self.__dict__ = body
CodeOnlyModule.forward = _forward_from_src(body['code'])
from .symbolic_trace import Tracer
# we shouldn't trace into any of the submodules, they were not
# because they were not traced in the original GraphModule
class KeepModules(Tracer):
def is_leaf_module(self, _: torch.nn.Module, __: str) -> bool:
return True
return KeepModules().trace(CodeOnlyModule(body))
# copy an attribute value with qualified name 'target' from 'from_module' to 'to_module'
# This installs empty Modules where none exist yet if they are subpaths of target
def _copy_attr(from_module: torch.nn.Module, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
f = getattr(from_module, item)
t = getattr(to_module, item, None)
if f is t:
# we have already installed one of its parents
# (e.g. target = root.linear.weight, but we have already installed root.linear)
# once we install a parent, we no longer need to copy the children
# since all the needed properties will already be present
return
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
from_module, to_module = f, t
setattr(to_module, field, getattr(from_module, field))
# Assign attribute 'from_obj' to the qualified name 'target' on 'to_module
# This installs empty Modules where none exist yet if they are subpaths of target
def _assign_attr(from_obj: Any, to_module: torch.nn.Module, target: str):
*prefix, field = target.split('.')
for item in prefix:
t = getattr(to_module, item, None)
if t is None:
t = torch.nn.Module()
setattr(to_module, item, t)
to_module = t
setattr(to_module, field, from_obj)
class GraphModule(torch.nn.Module):
"""
GraphModule is an nn.Module generated from an fx.Graph. GraphModule has
important attributes:
graph : The graph from which this GraphModule was generated
code : The Python source code for the function generated from `graph`
forward : The Python method generated from `graph`
Note that when `graph` is reassigned, `code` and `forward` will be automatically
regenerated.
"""
def __new__(cls: 'Type[GraphModule]', *args, **kwargs):
# each instance of a graph module needs its own forward method
# so create a new singleton class for each instance.
# it is a subclass of the user-defined class, the only difference
# is an extra layer to install the forward method
class GraphModuleImpl(cls): # type: ignore
pass
return super().__new__(GraphModuleImpl)
def __init__(self, root: Union[torch.nn.Module, Dict[str, Any]], graph: Graph):
"""
Construct a GraphModule.
root - `root` can either be an nn.Module instance or a Dict mapping strings to any attribute type.
- In the case that `root` is a Module, any references to Module-based objects (via qualified
name) in the Graph's Nodes' `target` field will be copied over from the respective place
within `root`'s Module hierarchy into the GraphModule's module hierarchy.
- In the case that `root` is a dict, the qualified name found in a Node's `target` will be
looked up directly in the dict's keys. The object mapped to by the Dict will be copied
over into the appropriate place within the GraphModule's module hierarchy.
graph - `graph` contains the nodes this GraphModule should use for code generation
"""
super().__init__()
if isinstance(root, torch.nn.Module):
if hasattr(root, 'training'):
self.training = root.training
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
_copy_attr(root, self, node.target)
elif isinstance(root, dict):
targets_to_copy = []
for node in graph.nodes:
if node.op in ['get_attr', 'call_module']:
assert isinstance(node.target, str)
if node.target not in root:
raise RuntimeError('Node ' + str(node) + ' referenced target ' + node.target +
' but that target was not provided in `root`!')
targets_to_copy.append(node.target)
# Sort targets in ascending order of the # of atoms.
# This will ensure that less deeply nested attributes are assigned
# before more deeply nested attributes. For example, foo.bar
# will be assigned before foo.bar.baz. Otherwise, we might assign
# the user-provided `foo.bar` and wipe out the previously-assigned
# `foo.bar.baz`
targets_to_copy.sort(key=lambda t: t.count('.'))
for target_to_copy in targets_to_copy:
_assign_attr(root[target_to_copy], self, target_to_copy)
else:
raise RuntimeError('Unsupported type ' + str(root) + ' passed for root!')
self.graph = graph
# TorchScript breaks trying to compile the graph setter because of the
# continued string literal. Issue here: https://github.com/pytorch/pytorch/issues/44842
#
# Shouldn't be an issue since these methods shouldn't be used in TorchScript anyway
__jit_unused_properties__ = ['graph']
@property
def graph(self):
return self._graph
@graph.setter
def graph(self, val) -> None:
self._graph = val
body, result, free_variables = self._graph.python_code(root_module='self')
body = '\n'.join(' ' + line for line in body.split('\n')) + '\n'
self.code = f"""\
def forward(self, {', '.join(free_variables)}):
{body}
return {result}
"""
cls = type(self)
cls.forward = _forward_from_src(self.code)
def __reduce__(self):
dict_without_graph = self.__dict__.copy()
del dict_without_graph['_graph']
return (deserialize_graphmodule, (dict_without_graph,))
# because __reduce__ is defined for serialization,
# we need to define deepcopy otherwise it will call __reduce__
# and cause symbolic tracing to occur every time we try to copy the object
def __deepcopy__(self, memo):
fake_mod = torch.nn.Module()
fake_mod.__dict__ = copy.deepcopy(self.__dict__)
return GraphModule(fake_mod, self.graph)
def __copy__(self):
return GraphModule(self, self.graph)
def __str__(self) -> str:
orig_str = super().__str__()
return '\n'.join([orig_str, self.code])
# workarounds for issues in __torch_function__
# WAR for __torch_function__ not handling tensor lists,
# fix is in https://github.com/pytorch/pytorch/pull/34725
# orig_cat = torch.cat
# def patched_cat(*args, **kwargs):
# tensors = args[0]
# for t in tensors:
# if isinstance(t, Proxy):
# return t.__torch_function__(patched_cat, (), args, kwargs)
# return orig_cat(*args, **kwargs)
# patched_cat.__module__ = 'torch'
# patched_cat.__name__ = 'cat'
# torch.cat = patched_cat
|
tests/utils/test_metrics.py | haochuanwei/hover | 251 | 3136 | <filename>tests/utils/test_metrics.py<gh_stars>100-1000
from hover.utils.metrics import classification_accuracy
import numpy as np
def test_classification_accuracy():
true = np.array([1, 2, 3, 4, 5, 6, 7, 7])
pred = np.array([1, 2, 3, 4, 5, 6, 7, 8])
accl = classification_accuracy(true, pred)
accr = classification_accuracy(pred, true)
assert np.allclose(accl, 7/8)
assert np.allclose(accr, 7/8)
|
test/tests/bootstrap/test_api20_windows_bootstrap.py | arunrordell/RackHD | 451 | 3141 | '''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows.
The default case is running a minimum payload Windows OS install.
Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument.
This test takes 30-45 minutes to run.
Example payload file (installed in configuration dir):
{"bootstrap-payload":
{"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "<KEY>",
"username": "rackhduser",
"password": "<PASSWORD>",
"smbUser": "vagrant",
"smbPassword": "<PASSWORD>"}}}
}
Example command line using external payload file:
python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json
RackHD Windows installation workflow requires special configuration of the RackHD server:
- A customized WinPE environment installed on RackHD server as documented here:
https://github.com/RackHD/on-tools/tree/master/winpe
- Samba installed on the RackHD server and configured as documented here:
http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install
- Windows 2012 installation distro installed on RackHD server or equivalent NFS mount.
- Windows 2012 activation key in the installation payload file.
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import random
import json
import time
from nosedep import depends
from datetime import datetime
log = flogging.get_loggers()
# sample default base payload
PAYLOAD = {"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "<KEY>",
"username": "rackhduser",
"password": "<PASSWORD>",
"smbUser": "vagrant",
"smbPassword": "<PASSWORD>"}}}
# if an external payload file is specified, use that
config = fit_common.fitcfg().get('bootstrap-payload', None)
if config:
PAYLOAD = config
# function to return the value of a field from the workflow response
def findall(obj, key):
if isinstance(obj, dict):
for k, v in obj.items():
if k == key:
log.error(" workflow error: %s", v)
findall(v, key)
elif isinstance(obj, list):
for item in obj:
findall(item, key)
else:
pass
# this routine polls a workflow task ID for completion
def wait_for_workflow_complete(instanceid, start_time, waittime=3200, cycle=30):
log.info_1(" Workflow started at time: " + str(datetime.fromtimestamp(start_time)))
while time.time() - start_time < waittime: # limit test to waittime seconds
result = fit_common.rackhdapi("/api/2.0/workflows/" + instanceid)
if result['status'] != 200:
log.error(" HTTP error: " + result['text'])
return False
if result['json']['status'] in ['running', 'pending']:
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
fit_common.time.sleep(cycle)
elif result['json']['status'] == 'succeeded':
log.info_1("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
end_time = time.time()
log.info_1(" Workflow completed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
return True
else:
end_time = time.time()
log.info_1(" Workflow failed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
try:
res = json.loads(result['text'])
findall(res, "error")
except:
res = result['text']
log.error(" Workflow failed: status: %s", result['json']['status'])
log.error(" Data: %s", json.dumps(res, indent=4, separators=(',', ':')))
return False
try:
res = json.loads(result['text'])
except:
res = result['text']
log.error(" Workflow Timeout: " + json.dumps(res, indent=4, separators=(',', ':')))
return False
# ------------------------ Tests -------------------------------------
@attr(all=False)
class api20_bootstrap_windows(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get the list of nodes
NODECATALOG = fit_common.node_select()
assert (len(NODECATALOG) != 0), "There are no nodes currently discovered"
# Select one node at random
cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)]
# Print node Id, node BMC mac ,node type
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
bmcresult = mondata['status']
if bmcresult != 200:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Error on catalog/bmc command")
else:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address'])
log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address'])
log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source'])
# delete active workflows for specified node
result = fit_common.cancel_active_workflows(cls.__NODE)
assert (result is True), "There are still some active workflows running against the node"
def test01_node_check(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: %s ", self.__class__.__NODE)
log.info_1(" Node SKU: %s ", nodesku)
log.info_1(" Graph Name: Graph.PowerOn.Node")
# Ensure the compute node is powered on and reachable
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload={"name": "Graph.PowerOn.Node"})
self.assertEqual(result['status'], 201, "Node Power on workflow API failed, see logs.")
self.assertTrue(wait_for_workflow_complete(result['json']['instanceId'], time.time(), 50, 5),
"Node Power on workflow failed, see logs.")
@depends(after=test01_node_check)
def test02_os_install(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: " + self.__class__.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Graph Name: Graph.InstallWindowsServer")
log.info_1(" Payload: " + fit_common.json.dumps(PAYLOAD))
# launch workflow
workflowid = None
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload=PAYLOAD)
if result['status'] == 201:
# workflow running
log.info_1(" InstanceID: " + result['json']['instanceId'])
workflowid = result['json']['instanceId']
else:
# workflow failed with response code
log.error(" InstanceID: " + result['text'])
self.fail("Workflow failed with response code: " + result['status'])
self.assertTrue(wait_for_workflow_complete(workflowid, time.time()), "OS Install workflow failed, see logs.")
if __name__ == '__main__':
fit_common.unittest.main()
|
tensorflow/tools/quantization/quantize_graph_test.py | tianyapiaozi/tensorflow | 374 | 3148 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
|
litex_boards/platforms/xilinx_kcu105.py | smunaut/litex-boards | 177 | 3160 | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2017-2019 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0,
Subsignal("p", Pins("G10"), IOStandard("LVDS")),
Subsignal("n", Pins("F10"), IOStandard("LVDS"))
),
("clk300", 0,
Subsignal("p", Pins("AK17"), IOStandard("DIFF_SSTL12")),
Subsignal("n", Pins("AK16"), IOStandard("DIFF_SSTL12"))
),
("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")),
# Leds
("user_led", 0, Pins("AP8"), IOStandard("LVCMOS18")),
("user_led", 1, Pins("H23"), IOStandard("LVCMOS18")),
("user_led", 2, Pins("P20"), IOStandard("LVCMOS18")),
("user_led", 3, Pins("P21"), IOStandard("LVCMOS18")),
("user_led", 4, Pins("N22"), IOStandard("LVCMOS18")),
("user_led", 5, Pins("M22"), IOStandard("LVCMOS18")),
("user_led", 6, Pins("R23"), IOStandard("LVCMOS18")),
("user_led", 7, Pins("P23"), IOStandard("LVCMOS18")),
# Buttons
("user_btn_c", 0, Pins("AE10"), IOStandard("LVCMOS18")),
("user_btn_n", 0, Pins("AD10"), IOStandard("LVCMOS18")),
("user_btn_s", 0, Pins("AF8"), IOStandard("LVCMOS18")),
("user_btn_w", 0, Pins("AF9"), IOStandard("LVCMOS18")),
("user_btn_e", 0, Pins("AE8"), IOStandard("LVCMOS18")),
# Switches
("user_dip_btn", 0, Pins("AN16"), IOStandard("LVCMOS12")),
("user_dip_btn", 1, Pins("AN19"), IOStandard("LVCMOS12")),
("user_dip_btn", 2, Pins("AP18"), IOStandard("LVCMOS12")),
("user_dip_btn", 3, Pins("AN14"), IOStandard("LVCMOS12")),
# SMA
("user_sma_clock", 0,
Subsignal("p", Pins("D23"), IOStandard("LVDS")),
Subsignal("n", Pins("C23"), IOStandard("LVDS"))
),
("user_sma_clock_p", 0, Pins("D23"), IOStandard("LVCMOS18")),
("user_sma_clock_n", 0, Pins("C23"), IOStandard("LVCMOS18")),
("user_sma_gpio", 0,
Subsignal("p", Pins("H27"), IOStandard("LVDS")),
Subsignal("n", Pins("G27"), IOStandard("LVDS"))
),
("user_sma_gpio_p", 0, Pins("H27"), IOStandard("LVCMOS18")),
("user_sma_gpio_n", 0, Pins("G27"), IOStandard("LVCMOS18")),
# I2C
("i2c", 0,
Subsignal("scl", Pins("J24")),
Subsignal("sda", Pins("J25")),
IOStandard("LVCMOS18")
),
# Serial
("serial", 0,
Subsignal("cts", Pins("L23")),
Subsignal("rts", Pins("K27")),
Subsignal("tx", Pins("K26")),
Subsignal("rx", Pins("G25")),
IOStandard("LVCMOS18")
),
# SPIFlash
("spiflash", 0, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("U7")),
Subsignal("dq", Pins("AC7 AB7 AA7 Y7")),
IOStandard("LVCMOS18")
),
("spiflash", 1, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("G26")),
Subsignal("dq", Pins("M20 L20 R21 R22")),
IOStandard("LVCMOS18")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cs_n", Pins("AH8")),
Subsignal("mosi", Pins("AD9"), Misc("PULLUP")),
Subsignal("miso", Pins("AP9"), Misc("PULLUP")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
("sdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cmd", Pins("AD9"), Misc("PULLUP True")),
Subsignal("data", Pins("AP9 AN9 AH9 AH8"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
# Rotary Encoder
("rotary", 0,
Subsignal("a", Pins("Y21")),
Subsignal("b", Pins("AD26")),
Subsignal("push", Pins("AF28")),
IOStandard("LVCMOS18")
),
# HDMI
("hdmi", 0,
Subsignal("d", Pins(
"AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12",
"AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11",
"AG10 AK8")),
Subsignal("de", Pins("AE11")),
Subsignal("clk", Pins("AF13")),
Subsignal("vsync", Pins("AH13")),
Subsignal("hsync", Pins("AE13")),
Subsignal("spdif", Pins("AE12")),
Subsignal("spdif_out", Pins("AF12")),
IOStandard("LVCMOS18")
),
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17",
"AF18 AH19 AF15 AD19 AJ14 AG19"),
IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AF17 AL15"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("AG15"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AF14"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AG14"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AD16"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cs_n", Pins("AL19"), IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("AH14"), IOStandard("SSTL12_DCI")),
#Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")),
#Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")),
#Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20",
"AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25",
"AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23",
"AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22",
"AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26",
"AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27",
"AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32",
"AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32"),
IOStandard("POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("clk_p", Pins("AE16"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_n", Pins("AE15"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cke", Pins("AD15"), IOStandard("SSTL12_DCI")),
Subsignal("odt", Pins("AJ18"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AL18"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST"),
),
# PCIe
("pcie_x1", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2")),
Subsignal("rx_n", Pins("AB1")),
Subsignal("tx_p", Pins("AC4")),
Subsignal("tx_n", Pins("AC3"))
),
("pcie_x2", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2")),
Subsignal("rx_n", Pins("AB1 AD1")),
Subsignal("tx_p", Pins("AC4 AE4")),
Subsignal("tx_n", Pins("AC3 AE3"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5"))
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3"))
),
# SGMII Clk
("sgmii_clock", 0,
Subsignal("p", Pins("P26"), IOStandard("LVDS_25")),
Subsignal("n", Pins("N26"), IOStandard("LVDS_25"))
),
# SI570
("si570_refclk", 0,
Subsignal("p", Pins("P6")),
Subsignal("n", Pins("P5"))
),
# SMA
("user_sma_mgt_refclk", 0,
Subsignal("p", Pins("V6")),
Subsignal("n", Pins("V5"))
),
("user_sma_mgt_tx", 0,
Subsignal("p", Pins("R4")),
Subsignal("n", Pins("R3"))
),
("user_sma_mgt_rx", 0,
Subsignal("p", Pins("P2")),
Subsignal("n", Pins("P1"))
),
# SFP
("sfp", 0,
Subsignal("txp", Pins("U4")),
Subsignal("txn", Pins("U3")),
Subsignal("rxp", Pins("T2")),
Subsignal("rxn", Pins("T1"))
),
("sfp_tx", 0,
Subsignal("p", Pins("U4")),
Subsignal("n", Pins("U3")),
),
("sfp_rx", 0,
Subsignal("p", Pins("T2")),
Subsignal("n", Pins("T1")),
),
("sfp_tx_disable_n", 0, Pins("AL8"), IOStandard("LVCMOS18")),
("sfp", 1,
Subsignal("txp", Pins("W4")),
Subsignal("txn", Pins("W3")),
Subsignal("rxp", Pins("V2")),
Subsignal("rxn", Pins("V1"))
),
("sfp_tx", 1,
Subsignal("p", Pins("W4")),
Subsignal("n", Pins("W3")),
),
("sfp_rx", 1,
Subsignal("p", Pins("V2")),
Subsignal("n", Pins("V1")),
),
("sfp_tx_disable_n", 1, Pins("D28"), IOStandard("LVCMOS18")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("HPC", {
"DP0_C2M_P" : "F6",
"DP0_C2M_N" : "F5",
"DP0_M2C_P" : "E4",
"DP0_M2C_N" : "E3",
"DP1_C2M_P" : "D6",
"DP1_C2M_N" : "D5",
"DP1_M2C_P" : "D2",
"DP1_M2C_N" : "D1",
"DP2_C2M_P" : "C4",
"DP2_C2M_N" : "C3",
"DP2_M2C_P" : "B2",
"DP2_M2C_N" : "B1",
"DP3_C2M_P" : "B6",
"DP3_C2M_N" : "B5",
"DP3_M2C_P" : "A4",
"DP3_M2C_N" : "A3",
"DP4_C2M_P" : "N4",
"DP4_C2M_N" : "N3",
"DP4_M2C_P" : "M2",
"DP4_M2C_N" : "M1",
"DP5_C2M_P" : "J4",
"DP5_C2M_N" : "J3",
"DP5_M2C_P" : "H2",
"DP5_M2C_N" : "H1",
"DP6_C2M_P" : "L4",
"DP6_C2M_N" : "L3",
"DP6_M2C_P" : "K2",
"DP6_M2C_N" : "K1",
"DP7_C2M_P" : "G4",
"DP7_C2M_N" : "G3",
"DP7_M2C_P" : "F2",
"DP7_M2C_N" : "F1",
"LA06_P" : "D13",
"LA06_N" : "C13",
"LA10_P" : "L8",
"LA10_N" : "K8",
"LA14_P" : "B10",
"LA14_N" : "A10",
"LA18_CC_P" : "E22",
"LA18_CC_N" : "E23",
"LA27_P" : "H21",
"LA27_N" : "G21",
"HA01_CC_P" : "E16",
"HA01_CC_N" : "D16",
"HA05_P" : "J15",
"HA05_N" : "J14",
"HA09_P" : "F18",
"HA09_N" : "F17",
"HA13_P" : "B14",
"HA13_N" : "A14",
"HA16_P" : "A19",
"HA16_N" : "A18",
"HA20_P" : "C19",
"HA20_N" : "B19",
"CLK1_M2C_P" : "E25",
"CLK1_M2C_N" : "D25",
"LA00_CC_P" : "H11",
"LA00_CC_N" : "G11",
"LA03_P" : "A13",
"LA03_N" : "A12",
"LA08_P" : "J8",
"LA08_N" : "H8",
"LA12_P" : "E10",
"LA12_N" : "D10",
"LA16_P" : "B9",
"LA16_N" : "A9",
"LA20_P" : "B24",
"LA20_N" : "A24",
"LA22_P" : "G24",
"LA22_N" : "F25",
"LA25_P" : "D20",
"LA25_N" : "D21",
"LA29_P" : "B20",
"LA29_N" : "A20",
"LA31_P" : "B25",
"LA31_N" : "A25",
"LA33_P" : "A27",
"LA33_N" : "A28",
"HA03_P" : "G15",
"HA03_N" : "G14",
"HA07_P" : "L19",
"HA07_N" : "L18",
"HA11_P" : "J19",
"HA11_N" : "J18",
"HA14_P" : "F15",
"HA14_N" : "F14",
"HA18_P" : "B17",
"HA18_N" : "B16",
"HA22_P" : "C18",
"HA22_N" : "C17",
"GBTCLK1_M2C_P" : "H6",
"GBTCLK1_M2C_N" : "H5",
"GBTCLK0_M2C_P" : "K6",
"GBTCLK0_M2C_N" : "K5",
"LA01_CC_P" : "G9",
"LA01_CC_N" : "F9",
"LA05_P" : "L13",
"LA05_N" : "K13",
"LA09_P" : "J9",
"LA09_N" : "H9",
"LA13_P" : "D9",
"LA13_N" : "C9",
"LA17_CC_P" : "D24",
"LA17_CC_N" : "C24",
"LA23_P" : "G22",
"LA23_N" : "F22",
"LA26_P" : "G20",
"LA26_N" : "F20",
"PG_M2C" : "L27",
"HA00_CC_P" : "G17",
"HA00_CC_N" : "G16",
"HA04_P" : "G19",
"HA04_N" : "F19",
"HA08_P" : "K18",
"HA08_N" : "K17",
"HA12_P" : "K16",
"HA12_N" : "J16",
"HA15_P" : "D14",
"HA15_N" : "C14",
"HA19_P" : "D19",
"HA19_N" : "D18",
"PRSNT_M2C_B" : "H24",
"CLK0_M2C_P" : "H12",
"CLK0_M2C_N" : "G12",
"LA02_P" : "K10",
"LA02_N" : "J10",
"LA04_P" : "L12",
"LA04_N" : "K12",
"LA07_P" : "F8",
"LA07_N" : "E8",
"LA11_P" : "K11",
"LA11_N" : "J11",
"LA15_P" : "D8",
"LA15_N" : "C8",
"LA19_P" : "C21",
"LA19_N" : "C22",
"LA21_P" : "F23",
"LA21_N" : "F24",
"LA24_P" : "E20",
"LA24_N" : "E21",
"LA28_P" : "B21",
"LA28_N" : "B22",
"LA30_P" : "C26",
"LA30_N" : "B26",
"LA32_P" : "E26",
"LA32_N" : "D26",
"HA02_P" : "H19",
"HA02_N" : "H18",
"HA06_P" : "L15",
"HA06_N" : "K15",
"HA10_P" : "H17",
"HA10_N" : "H16",
"HA17_CC_P" : "E18",
"HA17_CC_N" : "E17",
"HA21_P" : "E15",
"HA21_N" : "D15",
"HA23_P" : "B15",
"HA23_N" : "A15",
}
),
("LPC", {
"GBTCLK0_M2C_P" : "AA24",
"GBTCLK0_M2C_N" : "AA25",
"LA01_CC_P" : "W25",
"LA01_CC_N" : "Y25",
"LA05_P" : "V27",
"LA05_N" : "V28",
"LA09_P" : "V26",
"LA09_N" : "W26",
"LA13_P" : "AA20",
"LA13_N" : "AB20",
"LA17_CC_P" : "AA32",
"LA17_CC_N" : "AB32",
"LA23_P" : "AD30",
"LA23_N" : "AD31",
"LA26_P" : "AF33",
"LA26_N" : "AG34",
"CLK0_M2C_P" : "AA24",
"CLK0_M2C_N" : "AA25",
"LA02_P" : "AA22",
"LA02_N" : "AB22",
"LA04_P" : "U26",
"LA04_N" : "U27",
"LA07_P" : "V22",
"LA07_N" : "V23",
"LA11_P" : "V21",
"LA11_N" : "W21",
"LA15_P" : "AB25",
"LA15_N" : "AB26",
"LA19_P" : "AA29",
"LA19_N" : "AB29",
"LA21_P" : "AC33",
"LA21_N" : "AD33",
"LA24_P" : "AE32",
"LA24_N" : "AF32",
"LA28_P" : "V31",
"LA28_N" : "W31",
"LA30_P" : "Y31",
"LA30_N" : "Y32",
"LA32_P" : "W30",
"LA32_N" : "Y30",
"LA06_P" : "V29",
"LA06_N" : "W29",
"LA10_P" : "T22",
"LA10_N" : "T23",
"LA14_P" : "U21",
"LA14_N" : "U22",
"LA18_CC_P" : "AB30",
"LA18_CC_N" : "AB31",
"LA27_P" : "AG31",
"LA27_N" : "AG32",
"CLK1_M2C_P" : "AC31",
"CLK1_M2C_N" : "AC32",
"LA00_CC_P" : "W23",
"LA00_CC_N" : "W24",
"LA03_P" : "W28",
"LA03_N" : "Y28",
"LA08_P" : "U24",
"LA08_N" : "U25",
"LA12_P" : "AC22",
"LA12_N" : "AC23",
"LA16_P" : "AB21",
"LA16_N" : "AC21",
"LA20_P" : "AA34",
"LA20_N" : "AB34",
"LA22_P" : "AC34",
"LA22_N" : "AD34",
"LA25_P" : "AE33",
"LA25_N" : "AF34",
"LA29_P" : "U34",
"LA29_N" : "V34",
"LA31_P" : "V33",
"LA31_N" : "W34",
"LA33_P" : "W33",
"LA33_N" : "Y33",
}
),
("pmod0", "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17"),
("pmod1", "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk125"
default_clk_period = 1e9/125e6
def __init__(self):
XilinxPlatform.__init__(self, "xcku040-ffva1156-2-e", _io, _connectors, toolchain="vivado")
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk125", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("clk300", loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 44]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 45]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")
|
dabl/plot/tests/test_supervised.py | nrohan09-cloud/dabl | 500 | 3180 | <reponame>nrohan09-cloud/dabl<gh_stars>100-1000
import pytest
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.datasets import (make_regression, make_blobs, load_digits,
fetch_openml, load_diabetes)
from sklearn.preprocessing import KBinsDiscretizer
from dabl.preprocessing import clean, detect_types, guess_ordinal
from dabl.plot.supervised import (
plot, plot_classification_categorical,
plot_classification_continuous, plot_regression_categorical,
plot_regression_continuous)
from dabl.utils import data_df_from_bunch
from dabl import set_config
# FIXME: check that target is not y but a column name
@pytest.mark.filterwarnings('ignore:the matrix subclass')
@pytest.mark.parametrize("continuous_features, categorical_features, task",
itertools.product([0, 1, 3, 100], [0, 1, 3, 100],
['classification', 'regression']))
def test_plots_smoke(continuous_features, categorical_features, task):
# simple smoke test
# should be parametrized
n_samples = 100
X_cont, y_cont = make_regression(
n_samples=n_samples, n_features=continuous_features,
n_informative=min(continuous_features, 2))
X_cat, y_cat = make_regression(
n_samples=n_samples, n_features=categorical_features,
n_informative=min(categorical_features, 2))
if X_cat.shape[1] > 0:
X_cat = KBinsDiscretizer(encode='ordinal').fit_transform(X_cat)
cont_columns = ["asdf_%d_cont" % i for i in range(continuous_features)]
df_cont = pd.DataFrame(X_cont, columns=cont_columns)
if categorical_features > 0:
cat_columns = ["asdf_%d_cat" % i for i in range(categorical_features)]
df_cat = pd.DataFrame(X_cat, columns=cat_columns).astype('int')
df_cat = df_cat.astype("category")
X_df = pd.concat([df_cont, df_cat], axis=1)
else:
X_df = df_cont
assert(X_df.shape[1] == continuous_features + categorical_features)
X_clean = clean(X_df.copy())
y = y_cont + y_cat
if X_df.shape[1] == 0:
y = np.random.uniform(size=n_samples)
if task == "classification":
y = np.digitize(y, np.percentile(y, [5, 10, 60, 85]))
X_clean['target'] = y
if task == "classification":
X_clean['target'] = X_clean['target'].astype('category')
types = detect_types(X_clean)
column_types = types.T.idxmax()
assert np.all(column_types[:continuous_features] == 'continuous')
assert np.all(column_types[continuous_features:-1] == 'categorical')
if task == "classification":
assert column_types[-1] == 'categorical'
else:
assert column_types[-1] == 'continuous'
plot(X_clean, target_col='target')
plt.close("all")
@pytest.mark.parametrize("add, feature_type, target_type",
itertools.product([0, .1],
['continuous', 'categorical'],
['continuous', 'categorical']))
def test_type_hints(add, feature_type, target_type):
X = pd.DataFrame(np.random.randint(4, size=100)) + add
X['target'] = np.random.uniform(size=100)
plot(X, type_hints={0: feature_type,
'target': target_type},
target_col='target')
# get title of figure
text = plt.gcf()._suptitle.get_text()
assert feature_type.capitalize() in text
ax = plt.gca()
# one of the labels is 'target' iif regression
labels = ax.get_ylabel() + ax.get_xlabel()
assert ('target' in labels) == (target_type == 'continuous')
plt.close("all")
def test_float_classification_target():
# check we can plot even if we do classification with a float target
X, y = make_blobs()
data = pd.DataFrame(X)
data['target'] = y.astype(np.float)
types = detect_types(data)
assert types.categorical['target']
plot(data, target_col='target')
# same with "actual float" - we need to specify classification for that :-/
data['target'] = y.astype(np.float) + .2
plot(data, target_col='target', type_hints={'target': 'categorical'})
plt.close("all")
@pytest.mark.filterwarnings('ignore:Discarding near-constant')
def test_plot_classification_n_classes():
X, y = make_blobs()
X = pd.DataFrame(X)
X['target'] = 0
with pytest.raises(ValueError, match="Less than two classes"):
plot_classification_categorical(X, 'target')
with pytest.raises(ValueError, match="Less than two classes"):
plot_classification_continuous(X, 'target')
def test_plot_wrong_target_type():
X, y = make_blobs()
X = pd.DataFrame(X)
X['target'] = y
with pytest.raises(ValueError, match="need continuous"):
plot_regression_categorical(X, 'target')
with pytest.raises(ValueError, match="need continuous"):
plot_regression_continuous(X, 'target')
X['target'] = X[0]
with pytest.raises(ValueError, match="need categorical"):
plot_classification_categorical(X, 'target')
with pytest.raises(ValueError, match="need categorical"):
plot_classification_continuous(X, 'target')
def test_plot_target_low_card_int():
data = load_digits()
df = data_df_from_bunch(data)
plot(df[::10], target_col='target')
def test_plot_X_y():
X, y = make_blobs()
X = pd.DataFrame(X)
plot(X, y)
def test_plot_regression_numpy():
X, y = make_regression()
plot(X, y)
def test_plot_lda_binary():
X, y = make_blobs(centers=2)
X = pd.DataFrame(X)
plot(X, y, univariate_plot='kde')
def test_plot_int_column_name():
X, y = make_blobs()
X = pd.DataFrame(X)
X[3] = y
plot(X, target_col=3)
def test_negative_ordinal():
# check that a low card int with negative values is plotted correctly
data = pd.DataFrame([np.random.randint(0, 10, size=1000) - 5,
np.random.randint(0, 2, size=1000)]).T
# ensure first column is low_card_int
assert (detect_types(data).T.idxmax()
== ['low_card_int', 'categorical']).all()
assert guess_ordinal(data[0])
# smoke test
plot(data, target_col=1)
def test_large_ordinal():
# check that large integers don't bring us down (bincount memory error)
# here some random phone numbers
assert not guess_ordinal(pd.Series([6786930208, 2142878625, 9106275431]))
def test_plot_classification_continuous():
data = fetch_openml('MiceProtein')
df = data_df_from_bunch(data)
# only univariate plots
figures = plot_classification_continuous(df, target_col='target',
plot_pairwise=False)
assert len(figures) == 1
# top 10 axes
assert len(figures[0].get_axes()) == 10
# six is the minimum number of features for histograms
# (last column is target)
figures = plot_classification_continuous(df.iloc[:, -7:],
target_col='target',
plot_pairwise=False)
assert len(figures) == 1
assert len(figures[0].get_axes()) == 6
# for 5 features, do full pairplot
figures = plot_classification_continuous(df.iloc[:, -6:],
target_col='target',
plot_pairwise=False)
assert len(figures) == 1
# diagonal has twin axes
assert len(figures[0].get_axes()) == 5 * 5 + 5
# also do pairwise plots
figures = plot_classification_continuous(df, target_col='target',
random_state=42)
# univariate, pairwise, pca, lda
assert len(figures) == 4
# univariate
axes = figures[0].get_axes()
assert len(axes) == 10
# known result
assert axes[0].get_xlabel() == "SOD1_N"
# bar plot never has ylabel
assert axes[0].get_ylabel() == ""
# pairwise
axes = figures[1].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "SOD1_N"
assert axes[0].get_ylabel() == 'S6_N'
# PCA
axes = figures[2].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "PCA 1"
assert axes[0].get_ylabel() == 'PCA 5'
# LDA
axes = figures[3].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "LDA 0"
assert axes[0].get_ylabel() == 'LDA 1'
def test_plot_string_target():
X, y = make_blobs(n_samples=30)
data = pd.DataFrame(X)
y = pd.Series(y)
y[y == 0] = 'a'
y[y == 1] = 'b'
y[y == 2] = 'c'
data['target'] = y
plot(data, target_col='target')
def test_na_vals_reg_plot_raise_warning():
X, y = load_diabetes(return_X_y=True)
X = pd.DataFrame(X)
y[::50] = np.NaN
X['target_col'] = y
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot(X, 'target_col')
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot_regression_continuous(X, 'target_col')
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot_regression_categorical(X, 'target_col')
def test_plot_regression_continuous_with_target_outliers():
df = pd.DataFrame(
data={
"feature": np.random.randint(low=1, high=100, size=200),
# target values are bound between 50 and 100
"target": np.random.randint(low=50, high=100, size=200)
}
)
# append single outlier record with target value 0
df = df.append({"feature": 50, "target": 0}, ignore_index=True)
with pytest.warns(
UserWarning,
match="Dropped 1 outliers in column target."
):
plot_regression_continuous(df, 'target')
def test_plot_regression_categorical_missing_value():
df = pd.DataFrame({'y': np.random.normal(size=300)})
df.loc[100:200, 'y'] += 1
df.loc[200:300, 'y'] += 2
df['x'] = 'a'
df.loc[100:200, 'x'] = 'b'
df.loc[200:300, 'x'] = np.NaN
res = plot(df, target_col='y')
assert len(res[1][0, 0].get_yticklabels()) == 3
assert res[1][0, 0].get_yticklabels()[2].get_text() == 'dabl_mi...'
def test_label_truncation():
a = ('a_really_long_name_that_would_mess_up_the_layout_a_lot'
'_by_just_being_very_long')
b = ('the_target_that_has_an_equally_long_name_which_would_'
'mess_up_everything_as_well_but_in_different_places')
df = pd.DataFrame({a: np.random.uniform(0, 1, 1000)})
df[b] = df[a] + np.random.uniform(0, 0.1, 1000)
res = plot_regression_continuous(df, target_col=b)
assert res[0, 0].get_ylabel() == 'the_target_that_h...'
assert res[0, 0].get_xlabel() == 'a_really_long_nam...'
set_config(truncate_labels=False)
res = plot_regression_continuous(df, target_col=b)
assert res[0, 0].get_ylabel() == b
assert res[0, 0].get_xlabel() == a
set_config(truncate_labels=True)
|
tests/dummy_repo/tvm/python/tvm/api.py | csullivan/ffi-navigator | 148 | 3192 | from ._ffi.base import string_types
from ._ffi.object import register_object, Object
from ._ffi.node import register_node, NodeBase
from ._ffi.node import convert_to_node as _convert_to_node
from ._ffi.node_generic import _scalar_type_inference
from ._ffi.function import Function
from ._ffi.function import _init_api, register_func, get_global_func, extract_ext_funcs
from ._ffi.function import convert_to_tvm_func as _convert_tvm_func
from ._ffi.runtime_ctypes import TVMType
from . import _api_internal
from . import make as _make
from . import expr as _expr
from . import tensor as _tensor
from . import schedule as _schedule
from . import container as _container
from . import tag as _tag
int8 = "int8"
int32 = "int32"
float32 = "float32"
handle = "handle"
def min_value(dtype):
return _api_internal._min_value(dtype)
|
torchattacks/attacks/multiattack.py | Harry24k/adversarial-attacks-pytorch | 782 | 3193 | <filename>torchattacks/attacks/multiattack.py
import copy
import torch
from ..attack import Attack
class MultiAttack(Attack):
r"""
MultiAttack is a class to attack a model with various attacks agains same images and labels.
Arguments:
model (nn.Module): model to attack.
attacks (list): list of attacks.
Examples::
>>> atk1 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk2 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk = torchattacks.MultiAttack([atk1, atk2])
>>> adv_images = attack(images, labels)
"""
def __init__(self, attacks, verbose=False):
# Check validity
ids = []
for attack in attacks:
ids.append(id(attack.model))
if len(set(ids)) != 1:
raise ValueError("At least one of attacks is referencing a different model.")
super().__init__("MultiAttack", attack.model)
self.attacks = attacks
self.verbose = verbose
self._accumulate_multi_atk_records = False
self._multi_atk_records = [0.0]
self._supported_mode = ['default']
def forward(self, images, labels):
r"""
Overridden.
"""
batch_size = images.shape[0]
fails = torch.arange(batch_size).to(self.device)
final_images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
multi_atk_records = [batch_size]
for _, attack in enumerate(self.attacks):
adv_images = attack(images[fails], labels[fails])
outputs = self.model(adv_images)
_, pre = torch.max(outputs.data, 1)
corrects = (pre == labels[fails])
wrongs = ~corrects
succeeds = torch.masked_select(fails, wrongs)
succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), wrongs)
final_images[succeeds] = adv_images[succeeds_of_fails]
fails = torch.masked_select(fails, corrects)
multi_atk_records.append(len(fails))
if len(fails) == 0:
break
if self.verbose:
print(self._return_sr_record(multi_atk_records))
if self._accumulate_multi_atk_records:
self._update_multi_atk_records(multi_atk_records)
return final_images
def _clear_multi_atk_records(self):
self._multi_atk_records = [0.0]
def _covert_to_success_rates(self, multi_atk_records):
sr = [((1-multi_atk_records[i]/multi_atk_records[0])*100) for i in range(1, len(multi_atk_records))]
return sr
def _return_sr_record(self, multi_atk_records):
sr = self._covert_to_success_rates(multi_atk_records)
return "Attack success rate: "+" | ".join(["%2.2f %%"%item for item in sr])
def _update_multi_atk_records(self, multi_atk_records):
for i, item in enumerate(multi_atk_records):
self._multi_atk_records[i] += item
def save(self, data_loader, save_path=None, verbose=True, return_verbose=False):
r"""
Overridden.
"""
self._clear_multi_atk_records()
verbose = self.verbose
self.verbose = False
self._accumulate_multi_atk_records = True
for i, attack in enumerate(self.attacks):
self._multi_atk_records.append(0.0)
rob_acc, l2, elapsed_time = super().save(data_loader, save_path, verbose, return_verbose)
sr = self._covert_to_success_rates(self._multi_atk_records)
self._clear_multi_atk_records()
self._accumulate_multi_atk_records = False
self.verbose = verbose
if return_verbose:
return rob_acc, sr, l2, elapsed_time
def _save_print(self, progress, rob_acc, l2, elapsed_time, end):
r"""
Overridden.
"""
print("- Save progress: %2.2f %% / Robust accuracy: %2.2f %%"%(progress, rob_acc)+\
" / "+self._return_sr_record(self._multi_atk_records)+\
' / L2: %1.5f (%2.3f it/s) \t'%(l2, elapsed_time), end=end)
|
models/LRF_COCO_300.py | vaesl/LRF-Net | 180 | 3205 | import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
return layers
extras = {
'300': [1024, 'S', 512, 'S', 256]}
def multibox(size, vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [1, -2]
for k, v in enumerate(vgg_source):
if k == 0:
loc_layers += [nn.Conv2d(512,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers +=[nn.Conv2d(512,
cfg[k] * num_classes, kernel_size=3, padding=1)]
else:
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
i = 2
indicator = 3
for k, v in enumerate(extra_layers):
if (k < indicator+1 and k % 2 == 0) or (k > indicator+1 and k % 2 != 0):
loc_layers += [nn.Conv2d(v.out_channels, cfg[i]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[i]
* num_classes, kernel_size=3, padding=1)]
i += 1
return vgg, extra_layers, (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
def build_net(phase, size=300, num_classes=81):
if size != 300:
print("Error: The input image size is not supported!")
return
return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),
add_extras(size, extras[str(size)], 1024),
mbox[str(size)], num_classes), num_classes)
|
notification/app/node_modules/hiredis/binding.gyp | c2gconsulting/bulkpay | 208 | 3231 | {
'targets': [
{
'target_name': 'hiredis',
'sources': [
'src/hiredis.cc'
, 'src/reader.cc'
],
'include_dirs': ["<!(node -e \"require('nan')\")"],
'dependencies': [
'deps/hiredis.gyp:hiredis-c'
],
'defines': [
'_GNU_SOURCE'
],
'cflags': [
'-Wall',
'-O3'
]
}
]
}
|
recipes/cxxopts/all/conanfile.py | dvirtz/conan-center-index | 562 | 3234 | import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class CxxOptsConan(ConanFile):
name = "cxxopts"
homepage = "https://github.com/jarro2783/cxxopts"
url = "https://github.com/conan-io/conan-center-index"
description = "Lightweight C++ option parser library, supporting the standard GNU style syntax for options."
license = "MIT"
topics = ("conan", "option-parser", "positional-arguments ", "header-only")
settings = "compiler"
options = { "unicode": [True, False] }
default_options = { "unicode": False }
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 11
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.9",
"apple-clang": "8",
}
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def requirements(self):
if self.options.unicode:
self.requires("icu/64.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("{}.hpp".format(self.name), dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.unicode:
self.cpp_info.defines = ["CXXOPTS_USE_UNICODE"]
|
mmdet/ops/orn/functions/__init__.py | JarvisUSTC/DARDet | 274 | 3253 | <filename>mmdet/ops/orn/functions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from .active_rotating_filter import active_rotating_filter
from .active_rotating_filter import ActiveRotatingFilter
from .rotation_invariant_encoding import rotation_invariant_encoding
from .rotation_invariant_encoding import RotationInvariantEncoding
from .rotation_invariant_pooling import RotationInvariantPooling
__all__ = ['ActiveRotatingFilter', 'active_rotating_filter', 'rotation_invariant_encoding', 'RotationInvariantEncoding', 'RotationInvariantPooling'] |
setup.py | Liang813/einops | 4,738 | 3262 | <gh_stars>1000+
__author__ = '<NAME>'
from setuptools import setup
setup(
name="einops",
version='0.3.2',
description="A new flavour of deep learning operations",
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
url='https://github.com/arogozhnikov/einops',
author='<NAME>',
packages=['einops', 'einops.layers'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 ',
],
keywords='deep learning, neural networks, tensor manipulation, machine learning, '
'scientific computations, einops',
install_requires=[
# no run-time or installation-time dependencies
],
)
|
src/finn/custom_op/fpgadataflow/streamingfifo.py | AlexMontgomerie/finn | 283 | 3309 | # Copyright (c) 2020, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import numpy as np
from shutil import copy
import subprocess
import math
import warnings
from finn.custom_op.fpgadataflow.hlscustomop import HLSCustomOp
from finn.core.datatype import DataType
from onnx import TensorProto, helper
from finn.util.data_packing import npy_to_rtlsim_input, rtlsim_output_to_npy
from . import templates
class StreamingFIFO(HLSCustomOp):
def __init__(self, onnx_node):
super().__init__(onnx_node)
self.strm_fifo_wrapper = templates.strm_fifo_wrapper
def get_nodeattr_types(self):
my_attrs = {
# FIFO depth
"depth": ("i", True, 0),
# folded shape of input/output
"folded_shape": ("ints", True, []),
# FINN DataTypes for inputs/outputs
"dataType": ("s", True, ""),
# Toggle between hls or IPI implementation
# rtl - use the hls generated IP during stitching
# vivado - use the AXI Infrastructure FIFO
"impl_style": ("s", False, "rtl", {"rtl", "vivado"}),
# FPGA resource type for FIFOs when impl_style is vivado
# auto -- let Vivado decide
# block -- use BRAM
# distributed -- use LUTRAM
# ultra -- use URAM (on UltraScale+)
"ram_style": (
"s",
False,
"auto",
{"auto", "block", "distributed", "ultra"},
),
}
my_attrs.update(super().get_nodeattr_types())
return my_attrs
def make_shape_compatible_op(self, model):
exp_ishape = self.get_normal_input_shape()
oshape = self.get_normal_output_shape()
ishape = tuple(model.get_tensor_shape(self.onnx_node.input[0]))
assert ishape == tuple(exp_ishape), "Unexpect input shape for StreamingFIFO."
# implement tensor with correct shape
values = np.random.randn(*oshape).astype(np.float32)
return helper.make_node(
"Constant",
inputs=[],
outputs=[self.onnx_node.output[0]],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
def infer_node_datatype(self, model):
node = self.onnx_node
idt = model.get_tensor_datatype(node.input[0])
if idt != self.get_input_datatype():
warn_str = "inputDataType changing for %s: %s -> %s " % (
node.name,
str(self.get_input_datatype()),
str(idt),
)
warnings.warn(warn_str)
self.set_nodeattr("dataType", idt.name)
# data type stays the same
model.set_tensor_datatype(node.output[0], idt)
def verify_node(self):
pass
def get_verilog_top_module_name(self):
"Return the Verilog top module name for this node."
node = self.onnx_node
prefixed_top_name = "%s" % (node.name)
return prefixed_top_name
def code_generation_ipgen(self, model, fpgapart, clk):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
os.makedirs(verilog_dir)
# copy Q_srl.v from finn-rtllib to verilog directory
memstream_dir = "/workspace/finn/finn-rtllib/memstream/hdl/"
Q_file = os.path.join(memstream_dir, "Q_srl.v")
copy(Q_file, verilog_dir)
# empty code gen dictionary for new entries
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
self.code_gen_dict["$LAYER_NAME$"] = [
"{}_{}".format(self.onnx_node.name, self.onnx_node.name)
]
# make instream width a multiple of 8 for axi interface
in_width = self.get_instream_width_padded()
count_width = int(self.get_nodeattr("depth") - 1).bit_length()
self.code_gen_dict["$COUNT_RANGE$"] = ["[{}:0]".format(count_width - 1)]
self.code_gen_dict["$IN_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$OUT_RANGE$"] = ["[{}:0]".format(in_width - 1)]
self.code_gen_dict["$WIDTH$"] = [str(in_width)]
self.code_gen_dict["$DEPTH$"] = [str(self.get_nodeattr("depth"))]
template = self.strm_fifo_wrapper
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "{}.v".format(self.onnx_node.name)), "w")
f.write(template)
f.close()
self.code_gen_dict.clear()
def ipgen_singlenode_code(self):
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
verilog_dir = "{}/project_{}/sol1/impl/verilog".format(
code_gen_dir, self.onnx_node.name
)
# prepare the IP packaging tcl template
template = templates.ip_package_tcl
self.code_gen_dict.clear()
self.code_gen_dict["$TOPNAME$"] = ["{}".format(self.onnx_node.name)]
# note: setting the root dir as absolute can cause path problems
# the ipgen script will be invoked from the sources dir so root_dir=. is OK
self.code_gen_dict["$VERILOG_DIR$"] = ["."]
for key in self.code_gen_dict:
# transform list into long string separated by '\n'
code_gen_line = "\n".join(self.code_gen_dict[key])
template = template.replace(key, code_gen_line)
f = open(os.path.join(verilog_dir, "package_ip.tcl"), "w")
f.write(template)
f.close()
# create a shell script and call Vivado to invoke the IP pkg script
make_project_sh = verilog_dir + "/make_ip.sh"
working_dir = os.environ["PWD"]
with open(make_project_sh, "w") as f:
f.write("#!/bin/bash \n")
f.write("cd {}\n".format(verilog_dir))
f.write("vivado -mode batch -source package_ip.tcl\n")
f.write("cd {}\n".format(working_dir))
bash_command = ["bash", make_project_sh]
process_compile = subprocess.Popen(bash_command, stdout=subprocess.PIPE)
process_compile.communicate()
# set ipgen_path and ip_path to point to the new packaged IP
self.set_nodeattr("ipgen_path", verilog_dir)
self.set_nodeattr("ip_path", verilog_dir)
vlnv = "xilinx.com:hls:%s:1.0" % (self.onnx_node.name)
self.set_nodeattr("ip_vlnv", vlnv)
self.code_gen_dict.clear()
def get_normal_input_shape(self):
depth = self.get_nodeattr("depth")
# depth has to be between 2 and 256 with the current
# StreamingFIFO implementation
assert depth >= 2, """Depth is too low"""
if depth > 256 and self.get_nodeattr("impl_style") == "rtl":
warnings.warn(
"Depth is high, set between 2 and 256 for efficient SRL implementation"
)
# derive normal shape from folded shape
# StreamingFIFOs are inserted in between fpgadataflow nodes
# the folded shape could be for example (1, nf, pe)
# with nf (neuron folding): mh // pe
# the normal input shape is in this case (1, mh)
# so to achieve this the two inner dimensions are multiplied
# and together with all previous dimensions
# this gives the normal input shape
folded_shape = self.get_nodeattr("folded_shape")
# extract inner dimension
inner_dim = folded_shape[-1]
# multiply with the next inner dimension
folding_factor = folded_shape[-2] * inner_dim
normal_ishape = []
# create the normal_ishape
for i in range(len(folded_shape) - 2):
normal_ishape.append(folded_shape[i])
normal_ishape.append(folding_factor)
return normal_ishape
def get_normal_output_shape(self):
return self.get_normal_input_shape()
def get_folded_input_shape(self):
return self.get_nodeattr("folded_shape")
def get_folded_output_shape(self):
return self.get_nodeattr("folded_shape")
def get_instream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def get_outstream_width(self):
dtype = DataType[self.get_nodeattr("dataType")]
folded_shape = self.get_nodeattr("folded_shape")
in_width = folded_shape[-1] * dtype.bitwidth()
return in_width
def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
node = self.onnx_node
inp = context[node.input[0]]
exp_shape = self.get_normal_input_shape()
if mode == "cppsim":
output = inp
output = np.asarray([output], dtype=np.float32).reshape(*exp_shape)
context[node.output[0]] = output
elif mode == "rtlsim":
code_gen_dir = self.get_nodeattr("code_gen_dir_ipgen")
# create a npy file for the input of the node
assert (
str(inp.dtype) == "float32"
), """Input datatype is
not float32 as expected."""
expected_inp_shape = self.get_folded_input_shape()
reshaped_input = inp.reshape(expected_inp_shape)
if DataType[self.get_nodeattr("dataType")] == DataType.BIPOLAR:
# store bipolar activations as binary
reshaped_input = (reshaped_input + 1) / 2
export_idt = DataType.BINARY
else:
export_idt = DataType[self.get_nodeattr("dataType")]
# make copy before saving the array
reshaped_input = reshaped_input.copy()
np.save(os.path.join(code_gen_dir, "input_0.npy"), reshaped_input)
sim = self.get_rtlsim()
nbits = self.get_instream_width()
inp = npy_to_rtlsim_input(
"{}/input_0.npy".format(code_gen_dir), export_idt, nbits
)
super().reset_rtlsim(sim)
super().toggle_clk(sim)
output = self.rtlsim(sim, inp)
odt = DataType[self.get_nodeattr("dataType")]
target_bits = odt.bitwidth()
packed_bits = self.get_outstream_width()
out_npy_path = "{}/output.npy".format(code_gen_dir)
out_shape = self.get_folded_output_shape()
rtlsim_output_to_npy(
output, out_npy_path, odt, out_shape, packed_bits, target_bits
)
# load and reshape output
output = np.load(out_npy_path)
oshape = self.get_normal_output_shape()
output = np.asarray([output], dtype=np.float32).reshape(*oshape)
context[node.output[0]] = output
else:
raise Exception(
"""Invalid value for attribute exec_mode! Is currently set to: {}
has to be set to one of the following value ("cppsim", "rtlsim")""".format(
mode
)
)
def get_number_output_values(self):
folded_oshape = self.get_folded_output_shape()
return np.prod(folded_oshape[:-1])
def global_includes(self):
pass
def defines(self, var):
pass
def read_npy_data(self):
pass
def strm_decl(self):
pass
def docompute(self):
pass
def dataoutstrm(self):
pass
def save_as_npy(self):
pass
def blackboxfunction(self):
pass
def pragmas(self):
pass
def code_generation_ipi(self):
impl_style = self.get_nodeattr("impl_style")
if impl_style == "rtl":
return super().code_generation_ipi()
elif impl_style == "vivado":
cmd = []
node_name = self.onnx_node.name
depth = self.get_nodeattr("depth")
ram_style = self.get_nodeattr("ram_style")
# create a hierarchy for this layer, with the same port names
clk_name = self.get_verilog_top_module_intf_names()["clk"][0]
rst_name = self.get_verilog_top_module_intf_names()["rst"][0]
dout_name = self.get_verilog_top_module_intf_names()["m_axis"][0][0]
din_name = self.get_verilog_top_module_intf_names()["s_axis"][0][0]
cmd.append("create_bd_cell -type hier %s" % node_name)
cmd.append("create_bd_pin -dir I -type clk /%s/%s" % (node_name, clk_name))
cmd.append("create_bd_pin -dir I -type rst /%s/%s" % (node_name, rst_name))
cmd.append(
"create_bd_intf_pin -mode Master "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s"
% (node_name, dout_name)
)
cmd.append(
"create_bd_intf_pin -mode Slave "
"-vlnv xilinx.com:interface:axis_rtl:1.0 /%s/%s" % (node_name, din_name)
)
# instantiate and configure DWC
cmd.append(
"create_bd_cell -type ip "
"-vlnv xilinx.com:ip:axis_data_fifo:2.0 /%s/fifo" % node_name
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_DEPTH {%d}] "
"[get_bd_cells /%s/fifo]" % (depth, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.FIFO_MEMORY_TYPE {%s}] "
"[get_bd_cells /%s/fifo]" % (ram_style, node_name)
)
cmd.append(
"set_property -dict [list CONFIG.TDATA_NUM_BYTES {%d}] "
"[get_bd_cells /%s/fifo]"
% (np.ceil(self.get_outstream_width() / 8), node_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/M_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, dout_name)
)
cmd.append(
"connect_bd_intf_net [get_bd_intf_pins %s/fifo/S_AXIS] "
"[get_bd_intf_pins %s/%s]" % (node_name, node_name, din_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aresetn]"
% (node_name, rst_name, node_name)
)
cmd.append(
"connect_bd_net [get_bd_pins %s/%s] "
"[get_bd_pins %s/fifo/s_axis_aclk]" % (node_name, clk_name, node_name)
)
return cmd
else:
raise Exception(
"FIFO implementation style %s not supported, please use rtl or vivado"
% impl_style
)
def bram_estimation(self):
"""Calculates resource estimation for BRAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "block"):
# Non-BRAM based implementation
return 0
if W == 1:
return math.ceil(depth / 16384)
elif W == 2:
return math.ceil(depth / 8192)
elif W <= 4:
return (math.ceil(depth / 4096)) * (math.ceil(W / 4))
elif W <= 9:
return (math.ceil(depth / 2048)) * (math.ceil(W / 9))
elif W <= 18 or depth > 512:
return (math.ceil(depth / 1024)) * (math.ceil(W / 18))
else:
return (math.ceil(depth / 512)) * (math.ceil(W / 36))
def uram_estimation(self):
"""Calculates resource estimation for URAM"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
if impl == "rtl" or (impl == "vivado" and ram_type != "ultra"):
# Non-BRAM based implementation
return 0
else:
return (math.ceil(depth / 4096)) * (math.ceil(W / 72))
def bram_efficiency_estimation(self):
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
bram16_est = self.bram_estimation()
if bram16_est == 0:
return 1
wbits = W * depth
bram16_est_capacity = bram16_est * 36 * 512
return wbits / bram16_est_capacity
def lut_estimation(self):
"""Calculates resource estimations for LUTs"""
impl = self.get_nodeattr("impl_style")
ram_type = self.get_nodeattr("ram_style")
depth = self.get_nodeattr("depth")
W = self.get_instream_width()
address_luts = 2 * math.ceil(math.log(depth, 2))
if impl == "rtl" or (impl == "vivado" and ram_type == "distributed"):
ram_luts = (math.ceil(depth / 32)) * (math.ceil(W / 2))
else:
ram_luts = 0
return int(address_luts + ram_luts)
def prepare_rtlsim(self):
assert self.get_nodeattr("impl_style") != "vivado", (
"StreamingFIFO impl_style "
"cannot be vivado for rtlsim. Only impl_style=rtl supported."
)
super().prepare_rtlsim()
|
obswebsocket/requests.py | PanBartosz/obs-websocket-py | 123 | 3320 | <filename>obswebsocket/requests.py<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# THIS FILE WAS GENERATED BY generate_classes.py - DO NOT EDIT #
# (Generated on 2020-12-20 18:26:33.661372) #
from .base_classes import Baserequests
class GetVersion(Baserequests):
"""Returns the latest version of the plugin and the API.
:Returns:
*version*
type: double
OBSRemote compatible API version. Fixed to 1.1 for retrocompatibility.
*obs_websocket_version*
type: String
obs-websocket plugin version.
*obs_studio_version*
type: String
OBS Studio program version.
*available_requests*
type: String
List of available request types, formatted as a comma-separated list string (e.g. : "Method1,Method2,Method3").
*supported_image_export_formats*
type: String
List of supported formats for features that use image export (like the TakeSourceScreenshot request type) formatted as a comma-separated list string
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVersion'
self.datain['version'] = None
self.datain['obs-websocket-version'] = None
self.datain['obs-studio-version'] = None
self.datain['available-requests'] = None
self.datain['supported-image-export-formats'] = None
def getVersion(self):
return self.datain['version']
def getObsWebsocketVersion(self):
return self.datain['obs-websocket-version']
def getObsStudioVersion(self):
return self.datain['obs-studio-version']
def getAvailableRequests(self):
return self.datain['available-requests']
def getSupportedImageExportFormats(self):
return self.datain['supported-image-export-formats']
class GetAuthRequired(Baserequests):
"""Tells the client if authentication is required. If so, returns authentication parameters `challenge`
and `salt` (see "Authentication" for more information).
:Returns:
*authRequired*
type: boolean
Indicates whether authentication is required.
*challenge*
type: String (optional)
*salt*
type: String (optional)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetAuthRequired'
self.datain['authRequired'] = None
self.datain['challenge'] = None
self.datain['salt'] = None
def getAuthRequired(self):
return self.datain['authRequired']
def getChallenge(self):
return self.datain['challenge']
def getSalt(self):
return self.datain['salt']
class Authenticate(Baserequests):
"""Attempt to authenticate the client to the server.
:Arguments:
*auth*
type: String
Response to the auth challenge (see "Authentication" for more information).
"""
def __init__(self, auth):
Baserequests.__init__(self)
self.name = 'Authenticate'
self.dataout['auth'] = auth
class SetHeartbeat(Baserequests):
"""Enable/disable sending of the Heartbeat event
:Arguments:
*enable*
type: boolean
Starts/Stops emitting heartbeat messages
"""
def __init__(self, enable):
Baserequests.__init__(self)
self.name = 'SetHeartbeat'
self.dataout['enable'] = enable
class SetFilenameFormatting(Baserequests):
"""Set the filename formatting string
:Arguments:
*filename_formatting*
type: String
Filename formatting string to set.
"""
def __init__(self, filename_formatting):
Baserequests.__init__(self)
self.name = 'SetFilenameFormatting'
self.dataout['filename-formatting'] = filename_formatting
class GetFilenameFormatting(Baserequests):
"""Get the filename formatting string
:Returns:
*filename_formatting*
type: String
Current filename formatting string.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetFilenameFormatting'
self.datain['filename-formatting'] = None
def getFilenameFormatting(self):
return self.datain['filename-formatting']
class GetStats(Baserequests):
"""Get OBS stats (almost the same info as provided in OBS' stats window)
:Returns:
*stats*
type: OBSStats
[OBS stats](#obsstats)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStats'
self.datain['stats'] = None
def getStats(self):
return self.datain['stats']
class BroadcastCustomMessage(Baserequests):
"""Broadcast custom message to all connected WebSocket clients
:Arguments:
*realm*
type: String
Identifier to be choosen by the client
*data*
type: Object
User-defined data
"""
def __init__(self, realm, data):
Baserequests.__init__(self)
self.name = 'BroadcastCustomMessage'
self.dataout['realm'] = realm
self.dataout['data'] = data
class GetVideoInfo(Baserequests):
"""Get basic OBS video information
:Returns:
*baseWidth*
type: int
Base (canvas) width
*baseHeight*
type: int
Base (canvas) height
*outputWidth*
type: int
Output width
*outputHeight*
type: int
Output height
*scaleType*
type: String
Scaling method used if output size differs from base size
*fps*
type: double
Frames rendered per second
*videoFormat*
type: String
Video color format
*colorSpace*
type: String
Color space for YUV
*colorRange*
type: String
Color range (full or partial)
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetVideoInfo'
self.datain['baseWidth'] = None
self.datain['baseHeight'] = None
self.datain['outputWidth'] = None
self.datain['outputHeight'] = None
self.datain['scaleType'] = None
self.datain['fps'] = None
self.datain['videoFormat'] = None
self.datain['colorSpace'] = None
self.datain['colorRange'] = None
def getBaseWidth(self):
return self.datain['baseWidth']
def getBaseHeight(self):
return self.datain['baseHeight']
def getOutputWidth(self):
return self.datain['outputWidth']
def getOutputHeight(self):
return self.datain['outputHeight']
def getScaleType(self):
return self.datain['scaleType']
def getFps(self):
return self.datain['fps']
def getVideoFormat(self):
return self.datain['videoFormat']
def getColorSpace(self):
return self.datain['colorSpace']
def getColorRange(self):
return self.datain['colorRange']
class OpenProjector(Baserequests):
"""Open a projector window or create a projector on a monitor. Requires OBS v24.0.4 or newer.
:Arguments:
*type*
type: String (Optional)
Type of projector: `Preview` (default), `Source`, `Scene`, `StudioProgram`, or `Multiview` (case insensitive).
*monitor*
type: int (Optional)
Monitor to open the projector on. If -1 or omitted, opens a window.
*geometry*
type: String (Optional)
Size and position of the projector window (only if monitor is -1). Encoded in Base64 using [Qt's geometry encoding](https://doc.qt.io/qt-5/qwidget.html#saveGeometry). Corresponds to OBS's saved projectors.
*name*
type: String (Optional)
Name of the source or scene to be displayed (ignored for other projector types).
"""
def __init__(self, type, monitor, geometry, name):
Baserequests.__init__(self)
self.name = 'OpenProjector'
self.dataout['type'] = type
self.dataout['monitor'] = monitor
self.dataout['geometry'] = geometry
self.dataout['name'] = name
class TriggerHotkeyByName(Baserequests):
"""Executes hotkey routine, identified by hotkey unique name
:Arguments:
*hotkeyName*
type: String
Unique name of the hotkey, as defined when registering the hotkey (e.g. "ReplayBuffer.Save")
"""
def __init__(self, hotkeyName):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyByName'
self.dataout['hotkeyName'] = hotkeyName
class TriggerHotkeyBySequence(Baserequests):
"""Executes hotkey routine, identified by bound combination of keys. A single key combination might trigger multiple hotkey routines depending on user settings
:Arguments:
*keyId*
type: String
Main key identifier (e.g. `OBS_KEY_A` for key "A"). Available identifiers [here](https://github.com/obsproject/obs-studio/blob/master/libobs/obs-hotkeys.h)
*keyModifiers*
type: Object (Optional)
Optional key modifiers object. False entries can be ommitted
*keyModifiers.shift*
type: boolean
Trigger Shift Key
*keyModifiers.alt*
type: boolean
Trigger Alt Key
*keyModifiers.control*
type: boolean
Trigger Control (Ctrl) Key
*keyModifiers.command*
type: boolean
Trigger Command Key (Mac)
"""
def __init__(self, keyId, keyModifiers):
Baserequests.__init__(self)
self.name = 'TriggerHotkeyBySequence'
self.dataout['keyId'] = keyId
self.dataout['keyModifiers'] = keyModifiers
class PlayPauseMedia(Baserequests):
"""Pause or play a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*playPause*
type: boolean
Whether to pause or play the source. `false` for play, `true` for pause.
"""
def __init__(self, sourceName, playPause):
Baserequests.__init__(self)
self.name = 'PlayPauseMedia'
self.dataout['sourceName'] = sourceName
self.dataout['playPause'] = playPause
class RestartMedia(Baserequests):
"""Restart a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'RestartMedia'
self.dataout['sourceName'] = sourceName
class StopMedia(Baserequests):
"""Stop a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'StopMedia'
self.dataout['sourceName'] = sourceName
class NextMedia(Baserequests):
"""Skip to the next media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'NextMedia'
self.dataout['sourceName'] = sourceName
class PreviousMedia(Baserequests):
"""Go to the previous media item in the playlist. Supports only vlc media source (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'PreviousMedia'
self.dataout['sourceName'] = sourceName
class GetMediaDuration(Baserequests):
"""Get the length of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: For some reason, for the first 5 or so seconds that the media is playing, the total duration can be off by upwards of 50ms.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaDuration*
type: int
The total length of media in milliseconds..
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaDuration'
self.datain['mediaDuration'] = None
self.dataout['sourceName'] = sourceName
def getMediaDuration(self):
return self.datain['mediaDuration']
class GetMediaTime(Baserequests):
"""Get the current timestamp of media in milliseconds. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*timestamp*
type: int
The time in milliseconds since the start of the media.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaTime'
self.datain['timestamp'] = None
self.dataout['sourceName'] = sourceName
def getTimestamp(self):
return self.datain['timestamp']
class SetMediaTime(Baserequests):
"""Set the timestamp of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
*timestamp*
type: int
Milliseconds to set the timestamp to.
"""
def __init__(self, sourceName, timestamp):
Baserequests.__init__(self)
self.name = 'SetMediaTime'
self.dataout['sourceName'] = sourceName
self.dataout['timestamp'] = timestamp
class ScrubMedia(Baserequests):
"""Scrub media using a supplied offset. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
Note: Due to processing/network delays, this request is not perfect. The processing rate of this request has also not been tested.
:Arguments:
*sourceName*
type: String
Source name.
*timeOffset*
type: int
Millisecond offset (positive or negative) to offset the current media position.
"""
def __init__(self, sourceName, timeOffset):
Baserequests.__init__(self)
self.name = 'ScrubMedia'
self.dataout['sourceName'] = sourceName
self.dataout['timeOffset'] = timeOffset
class GetMediaState(Baserequests):
"""Get the current playing state of a media source. Supports ffmpeg and vlc media sources (as of OBS v25.0.8)
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*mediaState*
type: String
The media state of the provided source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetMediaState'
self.datain['mediaState'] = None
self.dataout['sourceName'] = sourceName
def getMediaState(self):
return self.datain['mediaState']
class GetMediaSourcesList(Baserequests):
"""List the media state of all media sources (vlc and media source)
:Returns:
*mediaSources*
type: Array<Object>
Array of sources
*mediaSources.*.sourceName*
type: String
Unique source name
*mediaSources.*.sourceKind*
type: String
Unique source internal type (a.k.a `ffmpeg_source` or `vlc_source`)
*mediaSources.*.mediaState*
type: String
The current state of media for that source. States: `none`, `playing`, `opening`, `buffering`, `paused`, `stopped`, `ended`, `error`, `unknown`
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetMediaSourcesList'
self.datain['mediaSources'] = None
def getMediaSources(self):
return self.datain['mediaSources']
class CreateSource(Baserequests):
"""Create a source and add it as a sceneitem to a scene.
:Arguments:
*sourceName*
type: String
Source name.
*sourceKind*
type: String
Source kind, Eg. `vlc_source`.
*sceneName*
type: String
Scene to add the new source to.
*sourceSettings*
type: Object (optional)
Source settings data.
*setVisible*
type: boolean (optional)
Set the created SceneItem as visible or not. Defaults to true
:Returns:
*itemId*
type: int
ID of the SceneItem in the scene.
"""
def __init__(self, sourceName, sourceKind, sceneName, sourceSettings=None, setVisible=None):
Baserequests.__init__(self)
self.name = 'CreateSource'
self.datain['itemId'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceKind'] = sourceKind
self.dataout['sceneName'] = sceneName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class GetSourcesList(Baserequests):
"""List all sources available in the running OBS instance
:Returns:
*sources*
type: Array<Object>
Array of sources
*sources.*.name*
type: String
Unique source name
*sources.*.typeId*
type: String
Non-unique source internal type (a.k.a kind)
*sources.*.type*
type: String
Source type. Value is one of the following: "input", "filter", "transition", "scene" or "unknown"
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourcesList'
self.datain['sources'] = None
def getSources(self):
return self.datain['sources']
class GetSourceTypesList(Baserequests):
"""Get a list of all available sources types
:Returns:
*types*
type: Array<Object>
Array of source types
*types.*.typeId*
type: String
Non-unique internal source type ID
*types.*.displayName*
type: String
Display name of the source type
*types.*.type*
type: String
Type. Value is one of the following: "input", "filter", "transition" or "other"
*types.*.defaultSettings*
type: Object
Default settings of this source type
*types.*.caps*
type: Object
Source type capabilities
*types.*.caps.isAsync*
type: Boolean
True if source of this type provide frames asynchronously
*types.*.caps.hasVideo*
type: Boolean
True if sources of this type provide video
*types.*.caps.hasAudio*
type: Boolean
True if sources of this type provide audio
*types.*.caps.canInteract*
type: Boolean
True if interaction with this sources of this type is possible
*types.*.caps.isComposite*
type: Boolean
True if sources of this type composite one or more sub-sources
*types.*.caps.doNotDuplicate*
type: Boolean
True if sources of this type should not be fully duplicated
*types.*.caps.doNotSelfMonitor*
type: Boolean
True if sources of this type may cause a feedback loop if it's audio is monitored and shouldn't be
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSourceTypesList'
self.datain['types'] = None
def getTypes(self):
return self.datain['types']
class GetVolume(Baserequests):
"""Get the volume of the specified source. Default response uses mul format, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*useDecibel*
type: boolean (optional)
Output volume in decibels of attenuation instead of amplitude/mul.
:Returns:
*name*
type: String
Source name.
*volume*
type: double
Volume of the source. Between `0.0` and `20.0` if using mul, under `26.0` if using dB.
*muted*
type: boolean
Indicates whether the source is muted.
"""
def __init__(self, source, useDecibel=None):
Baserequests.__init__(self)
self.name = 'GetVolume'
self.datain['name'] = None
self.datain['volume'] = None
self.datain['muted'] = None
self.dataout['source'] = source
self.dataout['useDecibel'] = useDecibel
def getName(self):
return self.datain['name']
def getVolume(self):
return self.datain['volume']
def getMuted(self):
return self.datain['muted']
class SetVolume(Baserequests):
"""Set the volume of the specified source. Default request format uses mul, NOT SLIDER PERCENTAGE.
:Arguments:
*source*
type: String
Source name.
*volume*
type: double
Desired volume. Must be between `0.0` and `20.0` for mul, and under 26.0 for dB. OBS will interpret dB values under -100.0 as Inf. Note: The OBS volume sliders only reach a maximum of 1.0mul/0.0dB, however OBS actually supports larger values.
*useDecibel*
type: boolean (optional)
Interperet `volume` data as decibels instead of amplitude/mul.
"""
def __init__(self, source, volume, useDecibel=None):
Baserequests.__init__(self)
self.name = 'SetVolume'
self.dataout['source'] = source
self.dataout['volume'] = volume
self.dataout['useDecibel'] = useDecibel
class GetMute(Baserequests):
"""Get the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*muted*
type: boolean
Mute status of the source.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetMute'
self.datain['name'] = None
self.datain['muted'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getMuted(self):
return self.datain['muted']
class SetMute(Baserequests):
"""Sets the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
*mute*
type: boolean
Desired mute status.
"""
def __init__(self, source, mute):
Baserequests.__init__(self)
self.name = 'SetMute'
self.dataout['source'] = source
self.dataout['mute'] = mute
class ToggleMute(Baserequests):
"""Inverts the mute status of a specified source.
:Arguments:
*source*
type: String
Source name.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'ToggleMute'
self.dataout['source'] = source
class GetAudioActive(Baserequests):
"""Get the audio's active status of a specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*audioActive*
type: boolean
Audio active status of the source.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioActive'
self.datain['audioActive'] = None
self.dataout['sourceName'] = sourceName
def getAudioActive(self):
return self.datain['audioActive']
class SetSourceName(Baserequests):
"""
Note: If the new name already exists as a source, obs-websocket will return an error.
:Arguments:
*sourceName*
type: String
Source name.
*newName*
type: String
New source name.
"""
def __init__(self, sourceName, newName):
Baserequests.__init__(self)
self.name = 'SetSourceName'
self.dataout['sourceName'] = sourceName
self.dataout['newName'] = newName
class SetSyncOffset(Baserequests):
"""Set the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
*offset*
type: int
The desired audio sync offset (in nanoseconds).
"""
def __init__(self, source, offset):
Baserequests.__init__(self)
self.name = 'SetSyncOffset'
self.dataout['source'] = source
self.dataout['offset'] = offset
class GetSyncOffset(Baserequests):
"""Get the audio sync offset of a specified source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*name*
type: String
Source name.
*offset*
type: int
The audio sync offset (in nanoseconds).
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetSyncOffset'
self.datain['name'] = None
self.datain['offset'] = None
self.dataout['source'] = source
def getName(self):
return self.datain['name']
def getOffset(self):
return self.datain['offset']
class GetSourceSettings(Baserequests):
"""Get settings of the specified source
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking if you expect a specific settings schema.
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
"""
def __init__(self, sourceName, sourceType=None):
Baserequests.__init__(self)
self.name = 'GetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class SetSourceSettings(Baserequests):
"""Set settings of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*sourceType*
type: String (optional)
Type of the specified source. Useful for type-checking to avoid settings a set of settings incompatible with the actual source's type.
*sourceSettings*
type: Object
Source settings (varies between source types, may require some probing around).
:Returns:
*sourceName*
type: String
Source name
*sourceType*
type: String
Type of the specified source
*sourceSettings*
type: Object
Updated source settings
"""
def __init__(self, sourceName, sourceSettings, sourceType=None):
Baserequests.__init__(self)
self.name = 'SetSourceSettings'
self.datain['sourceName'] = None
self.datain['sourceType'] = None
self.datain['sourceSettings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['sourceSettings'] = sourceSettings
self.dataout['sourceType'] = sourceType
def getSourceName(self):
return self.datain['sourceName']
def getSourceType(self):
return self.datain['sourceType']
def getSourceSettings(self):
return self.datain['sourceSettings']
class GetTextGDIPlusProperties(Baserequests):
"""Get the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*align*
type: String
Text Alignment ("left", "center", "right").
*bk_color*
type: int
Background color.
*bk_opacity*
type: int
Background opacity (0-100).
*chatlog*
type: boolean
Chat log.
*chatlog_lines*
type: int
Chat log lines.
*color*
type: int
Text color.
*extents*
type: boolean
Extents wrap.
*extents_cx*
type: int
Extents cx.
*extents_cy*
type: int
Extents cy.
*file*
type: String
File path name.
*read_from_file*
type: boolean
Read text from the specified file.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*gradient*
type: boolean
Gradient enabled.
*gradient_color*
type: int
Gradient color.
*gradient_dir*
type: float
Gradient direction.
*gradient_opacity*
type: int
Gradient opacity (0-100).
*outline*
type: boolean
Outline.
*outline_color*
type: int
Outline color.
*outline_size*
type: int
Outline size.
*outline_opacity*
type: int
Outline opacity (0-100).
*text*
type: String
Text content to be displayed.
*valign*
type: String
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean
Vertical text enabled.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextGDIPlusProperties'
self.datain['source'] = None
self.datain['align'] = None
self.datain['bk_color'] = None
self.datain['bk_opacity'] = None
self.datain['chatlog'] = None
self.datain['chatlog_lines'] = None
self.datain['color'] = None
self.datain['extents'] = None
self.datain['extents_cx'] = None
self.datain['extents_cy'] = None
self.datain['file'] = None
self.datain['read_from_file'] = None
self.datain['font'] = None
self.datain['gradient'] = None
self.datain['gradient_color'] = None
self.datain['gradient_dir'] = None
self.datain['gradient_opacity'] = None
self.datain['outline'] = None
self.datain['outline_color'] = None
self.datain['outline_size'] = None
self.datain['outline_opacity'] = None
self.datain['text'] = None
self.datain['valign'] = None
self.datain['vertical'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getAlign(self):
return self.datain['align']
def getBk_color(self):
return self.datain['bk_color']
def getBk_opacity(self):
return self.datain['bk_opacity']
def getChatlog(self):
return self.datain['chatlog']
def getChatlog_lines(self):
return self.datain['chatlog_lines']
def getColor(self):
return self.datain['color']
def getExtents(self):
return self.datain['extents']
def getExtents_cx(self):
return self.datain['extents_cx']
def getExtents_cy(self):
return self.datain['extents_cy']
def getFile(self):
return self.datain['file']
def getRead_from_file(self):
return self.datain['read_from_file']
def getFont(self):
return self.datain['font']
def getGradient(self):
return self.datain['gradient']
def getGradient_color(self):
return self.datain['gradient_color']
def getGradient_dir(self):
return self.datain['gradient_dir']
def getGradient_opacity(self):
return self.datain['gradient_opacity']
def getOutline(self):
return self.datain['outline']
def getOutline_color(self):
return self.datain['outline_color']
def getOutline_size(self):
return self.datain['outline_size']
def getOutline_opacity(self):
return self.datain['outline_opacity']
def getText(self):
return self.datain['text']
def getValign(self):
return self.datain['valign']
def getVertical(self):
return self.datain['vertical']
class SetTextGDIPlusProperties(Baserequests):
"""Set the current properties of a Text GDI Plus source.
:Arguments:
*source*
type: String
Name of the source.
*align*
type: String (optional)
Text Alignment ("left", "center", "right").
*bk_color*
type: int (optional)
Background color.
*bk_opacity*
type: int (optional)
Background opacity (0-100).
*chatlog*
type: boolean (optional)
Chat log.
*chatlog_lines*
type: int (optional)
Chat log lines.
*color*
type: int (optional)
Text color.
*extents*
type: boolean (optional)
Extents wrap.
*extents_cx*
type: int (optional)
Extents cx.
*extents_cy*
type: int (optional)
Extents cy.
*file*
type: String (optional)
File path name.
*read_from_file*
type: boolean (optional)
Read text from the specified file.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*gradient*
type: boolean (optional)
Gradient enabled.
*gradient_color*
type: int (optional)
Gradient color.
*gradient_dir*
type: float (optional)
Gradient direction.
*gradient_opacity*
type: int (optional)
Gradient opacity (0-100).
*outline*
type: boolean (optional)
Outline.
*outline_color*
type: int (optional)
Outline color.
*outline_size*
type: int (optional)
Outline size.
*outline_opacity*
type: int (optional)
Outline opacity (0-100).
*text*
type: String (optional)
Text content to be displayed.
*valign*
type: String (optional)
Text vertical alignment ("top", "center", "bottom").
*vertical*
type: boolean (optional)
Vertical text enabled.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, align=None, bk_color=None, bk_opacity=None, chatlog=None, chatlog_lines=None, color=None, extents=None, extents_cx=None, extents_cy=None, file=None, read_from_file=None, font=None, gradient=None, gradient_color=None, gradient_dir=None, gradient_opacity=None, outline=None, outline_color=None, outline_size=None, outline_opacity=None, text=None, valign=None, vertical=None, render=None):
Baserequests.__init__(self)
self.name = 'SetTextGDIPlusProperties'
self.dataout['source'] = source
self.dataout['align'] = align
self.dataout['bk_color'] = bk_color
self.dataout['bk_opacity'] = bk_opacity
self.dataout['chatlog'] = chatlog
self.dataout['chatlog_lines'] = chatlog_lines
self.dataout['color'] = color
self.dataout['extents'] = extents
self.dataout['extents_cx'] = extents_cx
self.dataout['extents_cy'] = extents_cy
self.dataout['file'] = file
self.dataout['read_from_file'] = read_from_file
self.dataout['font'] = font
self.dataout['gradient'] = gradient
self.dataout['gradient_color'] = gradient_color
self.dataout['gradient_dir'] = gradient_dir
self.dataout['gradient_opacity'] = gradient_opacity
self.dataout['outline'] = outline
self.dataout['outline_color'] = outline_color
self.dataout['outline_size'] = outline_size
self.dataout['outline_opacity'] = outline_opacity
self.dataout['text'] = text
self.dataout['valign'] = valign
self.dataout['vertical'] = vertical
self.dataout['render'] = render
class GetTextFreetype2Properties(Baserequests):
"""Get the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name
*color1*
type: int
Gradient top color.
*color2*
type: int
Gradient bottom color.
*custom_width*
type: int
Custom width (0 to disable).
*drop_shadow*
type: boolean
Drop shadow.
*font*
type: Object
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String
Font face.
*font.flags*
type: int
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int
Font text size.
*font.style*
type: String
Font Style (unknown function).
*from_file*
type: boolean
Read text from the specified file.
*log_mode*
type: boolean
Chat log.
*outline*
type: boolean
Outline.
*text*
type: String
Text content to be displayed.
*text_file*
type: String
File path.
*word_wrap*
type: boolean
Word wrap.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetTextFreetype2Properties'
self.datain['source'] = None
self.datain['color1'] = None
self.datain['color2'] = None
self.datain['custom_width'] = None
self.datain['drop_shadow'] = None
self.datain['font'] = None
self.datain['from_file'] = None
self.datain['log_mode'] = None
self.datain['outline'] = None
self.datain['text'] = None
self.datain['text_file'] = None
self.datain['word_wrap'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getColor1(self):
return self.datain['color1']
def getColor2(self):
return self.datain['color2']
def getCustom_width(self):
return self.datain['custom_width']
def getDrop_shadow(self):
return self.datain['drop_shadow']
def getFont(self):
return self.datain['font']
def getFrom_file(self):
return self.datain['from_file']
def getLog_mode(self):
return self.datain['log_mode']
def getOutline(self):
return self.datain['outline']
def getText(self):
return self.datain['text']
def getText_file(self):
return self.datain['text_file']
def getWord_wrap(self):
return self.datain['word_wrap']
class SetTextFreetype2Properties(Baserequests):
"""Set the current properties of a Text Freetype 2 source.
:Arguments:
*source*
type: String
Source name.
*color1*
type: int (optional)
Gradient top color.
*color2*
type: int (optional)
Gradient bottom color.
*custom_width*
type: int (optional)
Custom width (0 to disable).
*drop_shadow*
type: boolean (optional)
Drop shadow.
*font*
type: Object (optional)
Holds data for the font. Ex: `"font": { "face": "Arial", "flags": 0, "size": 150, "style": "" }`
*font.face*
type: String (optional)
Font face.
*font.flags*
type: int (optional)
Font text styling flag. `Bold=1, Italic=2, Bold Italic=3, Underline=5, Strikeout=8`
*font.size*
type: int (optional)
Font text size.
*font.style*
type: String (optional)
Font Style (unknown function).
*from_file*
type: boolean (optional)
Read text from the specified file.
*log_mode*
type: boolean (optional)
Chat log.
*outline*
type: boolean (optional)
Outline.
*text*
type: String (optional)
Text content to be displayed.
*text_file*
type: String (optional)
File path.
*word_wrap*
type: boolean (optional)
Word wrap.
"""
def __init__(self, source, color1=None, color2=None, custom_width=None, drop_shadow=None, font=None, from_file=None, log_mode=None, outline=None, text=None, text_file=None, word_wrap=None):
Baserequests.__init__(self)
self.name = 'SetTextFreetype2Properties'
self.dataout['source'] = source
self.dataout['color1'] = color1
self.dataout['color2'] = color2
self.dataout['custom_width'] = custom_width
self.dataout['drop_shadow'] = drop_shadow
self.dataout['font'] = font
self.dataout['from_file'] = from_file
self.dataout['log_mode'] = log_mode
self.dataout['outline'] = outline
self.dataout['text'] = text
self.dataout['text_file'] = text_file
self.dataout['word_wrap'] = word_wrap
class GetBrowserSourceProperties(Baserequests):
"""Get current properties for a Browser Source.
:Arguments:
*source*
type: String
Source name.
:Returns:
*source*
type: String
Source name.
*is_local_file*
type: boolean
Indicates that a local file is in use.
*local_file*
type: String
file path.
*url*
type: String
Url.
*css*
type: String
CSS to inject.
*width*
type: int
Width.
*height*
type: int
Height.
*fps*
type: int
Framerate.
*shutdown*
type: boolean
Indicates whether the source should be shutdown when not visible.
"""
def __init__(self, source):
Baserequests.__init__(self)
self.name = 'GetBrowserSourceProperties'
self.datain['source'] = None
self.datain['is_local_file'] = None
self.datain['local_file'] = None
self.datain['url'] = None
self.datain['css'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['fps'] = None
self.datain['shutdown'] = None
self.dataout['source'] = source
def getSource(self):
return self.datain['source']
def getIs_local_file(self):
return self.datain['is_local_file']
def getLocal_file(self):
return self.datain['local_file']
def getUrl(self):
return self.datain['url']
def getCss(self):
return self.datain['css']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getFps(self):
return self.datain['fps']
def getShutdown(self):
return self.datain['shutdown']
class SetBrowserSourceProperties(Baserequests):
"""Set current properties for a Browser Source.
:Arguments:
*source*
type: String
Name of the source.
*is_local_file*
type: boolean (optional)
Indicates that a local file is in use.
*local_file*
type: String (optional)
file path.
*url*
type: String (optional)
Url.
*css*
type: String (optional)
CSS to inject.
*width*
type: int (optional)
Width.
*height*
type: int (optional)
Height.
*fps*
type: int (optional)
Framerate.
*shutdown*
type: boolean (optional)
Indicates whether the source should be shutdown when not visible.
*render*
type: boolean (optional)
Visibility of the scene item.
"""
def __init__(self, source, is_local_file=None, local_file=None, url=None, css=None, width=None, height=None, fps=None, shutdown=None, render=None):
Baserequests.__init__(self)
self.name = 'SetBrowserSourceProperties'
self.dataout['source'] = source
self.dataout['is_local_file'] = is_local_file
self.dataout['local_file'] = local_file
self.dataout['url'] = url
self.dataout['css'] = css
self.dataout['width'] = width
self.dataout['height'] = height
self.dataout['fps'] = fps
self.dataout['shutdown'] = shutdown
self.dataout['render'] = render
class GetSpecialSources(Baserequests):
"""Get configured special sources like Desktop Audio and Mic/Aux sources.
:Returns:
*desktop_1*
type: String (optional)
Name of the first Desktop Audio capture source.
*desktop_2*
type: String (optional)
Name of the second Desktop Audio capture source.
*mic_1*
type: String (optional)
Name of the first Mic/Aux input source.
*mic_2*
type: String (optional)
Name of the second Mic/Aux input source.
*mic_3*
type: String (optional)
NAme of the third Mic/Aux input source.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSpecialSources'
self.datain['desktop-1'] = None
self.datain['desktop-2'] = None
self.datain['mic-1'] = None
self.datain['mic-2'] = None
self.datain['mic-3'] = None
def getDesktop1(self):
return self.datain['desktop-1']
def getDesktop2(self):
return self.datain['desktop-2']
def getMic1(self):
return self.datain['mic-1']
def getMic2(self):
return self.datain['mic-2']
def getMic3(self):
return self.datain['mic-3']
class GetSourceFilters(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
:Returns:
*filters*
type: Array<Object>
List of filters for the specified source
*filters.*.enabled*
type: Boolean
Filter status (enabled or not)
*filters.*.type*
type: String
Filter type
*filters.*.name*
type: String
Filter name
*filters.*.settings*
type: Object
Filter settings
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetSourceFilters'
self.datain['filters'] = None
self.dataout['sourceName'] = sourceName
def getFilters(self):
return self.datain['filters']
class GetSourceFilterInfo(Baserequests):
"""List filters applied to a source
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
:Returns:
*enabled*
type: Boolean
Filter status (enabled or not)
*type*
type: String
Filter type
*name*
type: String
Filter name
*settings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'GetSourceFilterInfo'
self.datain['enabled'] = None
self.datain['type'] = None
self.datain['name'] = None
self.datain['settings'] = None
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
def getEnabled(self):
return self.datain['enabled']
def getType(self):
return self.datain['type']
def getName(self):
return self.datain['name']
def getSettings(self):
return self.datain['settings']
class AddFilterToSource(Baserequests):
"""Add a new filter to a source. Available source types along with their settings properties are available from `GetSourceTypesList`.
:Arguments:
*sourceName*
type: String
Name of the source on which the filter is added
*filterName*
type: String
Name of the new filter
*filterType*
type: String
Filter type
*filterSettings*
type: Object
Filter settings
"""
def __init__(self, sourceName, filterName, filterType, filterSettings):
Baserequests.__init__(self)
self.name = 'AddFilterToSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterType'] = filterType
self.dataout['filterSettings'] = filterSettings
class RemoveFilterFromSource(Baserequests):
"""Remove a filter from a source
:Arguments:
*sourceName*
type: String
Name of the source from which the specified filter is removed
*filterName*
type: String
Name of the filter to remove
"""
def __init__(self, sourceName, filterName):
Baserequests.__init__(self)
self.name = 'RemoveFilterFromSource'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
class ReorderSourceFilter(Baserequests):
"""Move a filter in the chain (absolute index positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*newIndex*
type: Integer
Desired position of the filter in the chain
"""
def __init__(self, sourceName, filterName, newIndex):
Baserequests.__init__(self)
self.name = 'ReorderSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['newIndex'] = newIndex
class MoveSourceFilter(Baserequests):
"""Move a filter in the chain (relative positioning)
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reorder
*movementType*
type: String
How to move the filter around in the source's filter chain. Either "up", "down", "top" or "bottom".
"""
def __init__(self, sourceName, filterName, movementType):
Baserequests.__init__(self)
self.name = 'MoveSourceFilter'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['movementType'] = movementType
class SetSourceFilterSettings(Baserequests):
"""Update settings of a filter
:Arguments:
*sourceName*
type: String
Name of the source to which the filter belongs
*filterName*
type: String
Name of the filter to reconfigure
*filterSettings*
type: Object
New settings. These will be merged to the current filter settings.
"""
def __init__(self, sourceName, filterName, filterSettings):
Baserequests.__init__(self)
self.name = 'SetSourceFilterSettings'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterSettings'] = filterSettings
class SetSourceFilterVisibility(Baserequests):
"""Change the visibility/enabled state of a filter
:Arguments:
*sourceName*
type: String
Source name
*filterName*
type: String
Source filter name
*filterEnabled*
type: Boolean
New filter state
"""
def __init__(self, sourceName, filterName, filterEnabled):
Baserequests.__init__(self)
self.name = 'SetSourceFilterVisibility'
self.dataout['sourceName'] = sourceName
self.dataout['filterName'] = filterName
self.dataout['filterEnabled'] = filterEnabled
class GetAudioMonitorType(Baserequests):
"""Get the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
:Returns:
*monitorType*
type: String
The monitor type in use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName):
Baserequests.__init__(self)
self.name = 'GetAudioMonitorType'
self.datain['monitorType'] = None
self.dataout['sourceName'] = sourceName
def getMonitorType(self):
return self.datain['monitorType']
class SetAudioMonitorType(Baserequests):
"""Set the audio monitoring type of the specified source.
:Arguments:
*sourceName*
type: String
Source name.
*monitorType*
type: String
The monitor type to use. Options: `none`, `monitorOnly`, `monitorAndOutput`.
"""
def __init__(self, sourceName, monitorType):
Baserequests.__init__(self)
self.name = 'SetAudioMonitorType'
self.dataout['sourceName'] = sourceName
self.dataout['monitorType'] = monitorType
class TakeSourceScreenshot(Baserequests):
"""
At least `embedPictureFormat` or `saveToFilePath` must be specified.
Clients can specify `width` and `height` parameters to receive scaled pictures. Aspect ratio is
preserved if only one of these two parameters is specified.
:Arguments:
*sourceName*
type: String (optional)
Source name. Note that, since scenes are also sources, you can also provide a scene name. If not provided, the currently active scene is used.
*embedPictureFormat*
type: String (optional)
Format of the Data URI encoded picture. Can be "png", "jpg", "jpeg" or "bmp" (or any other value supported by Qt's Image module)
*saveToFilePath*
type: String (optional)
Full file path (file extension included) where the captured image is to be saved. Can be in a format different from `pictureFormat`. Can be a relative path.
*fileFormat*
type: String (optional)
Format to save the image file as (one of the values provided in the `supported-image-export-formats` response field of `GetVersion`). If not specified, tries to guess based on file extension.
*compressionQuality*
type: int (optional)
Compression ratio between -1 and 100 to write the image with. -1 is automatic, 1 is smallest file/most compression, 100 is largest file/least compression. Varies with image type.
*width*
type: int (optional)
Screenshot width. Defaults to the source's base width.
*height*
type: int (optional)
Screenshot height. Defaults to the source's base height.
:Returns:
*sourceName*
type: String
Source name
*img*
type: String
Image Data URI (if `embedPictureFormat` was specified in the request)
*imageFile*
type: String
Absolute path to the saved image file (if `saveToFilePath` was specified in the request)
"""
def __init__(self, sourceName=None, embedPictureFormat=None, saveToFilePath=None, fileFormat=None, compressionQuality=None, width=None, height=None):
Baserequests.__init__(self)
self.name = 'TakeSourceScreenshot'
self.datain['sourceName'] = None
self.datain['img'] = None
self.datain['imageFile'] = None
self.dataout['sourceName'] = sourceName
self.dataout['embedPictureFormat'] = embedPictureFormat
self.dataout['saveToFilePath'] = saveToFilePath
self.dataout['fileFormat'] = fileFormat
self.dataout['compressionQuality'] = compressionQuality
self.dataout['width'] = width
self.dataout['height'] = height
def getSourceName(self):
return self.datain['sourceName']
def getImg(self):
return self.datain['img']
def getImageFile(self):
return self.datain['imageFile']
class ListOutputs(Baserequests):
"""List existing outputs
:Returns:
*outputs*
type: Array<Output>
Outputs list
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListOutputs'
self.datain['outputs'] = None
def getOutputs(self):
return self.datain['outputs']
class GetOutputInfo(Baserequests):
"""Get information about a single output
:Arguments:
*outputName*
type: String
Output name
:Returns:
*outputInfo*
type: Output
Output info
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'GetOutputInfo'
self.datain['outputInfo'] = None
self.dataout['outputName'] = outputName
def getOutputInfo(self):
return self.datain['outputInfo']
class StartOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
"""
def __init__(self, outputName):
Baserequests.__init__(self)
self.name = 'StartOutput'
self.dataout['outputName'] = outputName
class StopOutput(Baserequests):
"""
Note: Controlling outputs is an experimental feature of obs-websocket. Some plugins which add outputs to OBS may not function properly when they are controlled in this way.
:Arguments:
*outputName*
type: String
Output name
*force*
type: boolean (optional)
Force stop (default: false)
"""
def __init__(self, outputName, force=None):
Baserequests.__init__(self)
self.name = 'StopOutput'
self.dataout['outputName'] = outputName
self.dataout['force'] = force
class SetCurrentProfile(Baserequests):
"""Set the currently active profile.
:Arguments:
*profile_name*
type: String
Name of the desired profile.
"""
def __init__(self, profile_name):
Baserequests.__init__(self)
self.name = 'SetCurrentProfile'
self.dataout['profile-name'] = profile_name
class GetCurrentProfile(Baserequests):
"""Get the name of the current profile.
:Returns:
*profile_name*
type: String
Name of the currently active profile.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentProfile'
self.datain['profile-name'] = None
def getProfileName(self):
return self.datain['profile-name']
class ListProfiles(Baserequests):
"""Get a list of available profiles.
:Returns:
*profiles*
type: Array<Object>
List of available profiles.
*profiles.*.profile_name*
type: String
Filter name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListProfiles'
self.datain['profiles'] = None
def getProfiles(self):
return self.datain['profiles']
class GetRecordingStatus(Baserequests):
"""Get current recording status.
:Returns:
*isRecording*
type: boolean
Current recording status.
*isRecordingPaused*
type: boolean
Whether the recording is paused or not.
*recordTimecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*recordingFilename*
type: String (optional)
Absolute path to the recording file (only present if currently recording).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingStatus'
self.datain['isRecording'] = None
self.datain['isRecordingPaused'] = None
self.datain['recordTimecode'] = None
self.datain['recordingFilename'] = None
def getIsRecording(self):
return self.datain['isRecording']
def getIsRecordingPaused(self):
return self.datain['isRecordingPaused']
def getRecordTimecode(self):
return self.datain['recordTimecode']
def getRecordingFilename(self):
return self.datain['recordingFilename']
class StartStopRecording(Baserequests):
"""Toggle recording on or off (depending on the current recording state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopRecording'
class StartRecording(Baserequests):
"""Start recording.
Will return an `error` if recording is already active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartRecording'
class StopRecording(Baserequests):
"""Stop recording.
Will return an `error` if recording is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopRecording'
class PauseRecording(Baserequests):
"""Pause the current recording.
Returns an error if recording is not active or already paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'PauseRecording'
class ResumeRecording(Baserequests):
"""Resume/unpause the current recording (if paused).
Returns an error if recording is not active or not paused.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ResumeRecording'
class SetRecordingFolder(Baserequests):
"""
Please note: if `SetRecordingFolder` is called while a recording is
in progress, the change won't be applied immediately and will be
effective on the next recording.
:Arguments:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self, rec_folder):
Baserequests.__init__(self)
self.name = 'SetRecordingFolder'
self.dataout['rec-folder'] = rec_folder
class GetRecordingFolder(Baserequests):
"""Get the path of the current recording folder.
:Returns:
*rec_folder*
type: String
Path of the recording folder.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetRecordingFolder'
self.datain['rec-folder'] = None
def getRecFolder(self):
return self.datain['rec-folder']
class GetReplayBufferStatus(Baserequests):
"""Get the status of the OBS replay buffer.
:Returns:
*isReplayBufferActive*
type: boolean
Current recording status.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetReplayBufferStatus'
self.datain['isReplayBufferActive'] = None
def getIsReplayBufferActive(self):
return self.datain['isReplayBufferActive']
class StartStopReplayBuffer(Baserequests):
"""Toggle the Replay Buffer on/off (depending on the current state of the replay buffer).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopReplayBuffer'
class StartReplayBuffer(Baserequests):
"""Start recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is already active or if the
"Save Replay Buffer" hotkey is not set in OBS' settings.
Setting this hotkey is mandatory, even when triggering saves only
through obs-websocket.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartReplayBuffer'
class StopReplayBuffer(Baserequests):
"""Stop recording into the Replay Buffer.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopReplayBuffer'
class SaveReplayBuffer(Baserequests):
"""Flush and save the contents of the Replay Buffer to disk. This is
basically the same as triggering the "Save Replay Buffer" hotkey.
Will return an `error` if the Replay Buffer is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveReplayBuffer'
class SetCurrentSceneCollection(Baserequests):
"""Change the active scene collection.
:Arguments:
*sc_name*
type: String
Name of the desired scene collection.
"""
def __init__(self, sc_name):
Baserequests.__init__(self)
self.name = 'SetCurrentSceneCollection'
self.dataout['sc-name'] = sc_name
class GetCurrentSceneCollection(Baserequests):
"""Get the name of the current scene collection.
:Returns:
*sc_name*
type: String
Name of the currently active scene collection.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentSceneCollection'
self.datain['sc-name'] = None
def getScName(self):
return self.datain['sc-name']
class ListSceneCollections(Baserequests):
"""List available scene collections
:Returns:
*scene_collections*
type: Array<String>
Scene collections list
*scene_collections.*.sc_name*
type: String
Scene collection name
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ListSceneCollections'
self.datain['scene-collections'] = None
def getSceneCollections(self):
return self.datain['scene-collections']
class GetSceneItemList(Baserequests):
"""Get a list of all scene items in a scene.
:Arguments:
*sceneName*
type: String (optional)
Name of the scene to get the list of scene items from. Defaults to the current scene if not specified.
:Returns:
*sceneName*
type: String
Name of the requested (or current) scene
*sceneItems*
type: Array<Object>
Array of scene items
*sceneItems.*.itemId*
type: int
Unique item id of the source item
*sceneItems.*.sourceKind*
type: String
ID if the scene item's source. For example `vlc_source` or `image_source`
*sceneItems.*.sourceName*
type: String
Name of the scene item's source
*sceneItems.*.sourceType*
type: String
Type of the scene item's source. Either `input`, `group`, or `scene`
"""
def __init__(self, sceneName=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemList'
self.datain['sceneName'] = None
self.datain['sceneItems'] = None
self.dataout['sceneName'] = sceneName
def getSceneName(self):
return self.datain['sceneName']
def getSceneItems(self):
return self.datain['sceneItems']
class GetSceneItemProperties(Baserequests):
"""Gets the scene specific properties of the specified source item.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
:Returns:
*name*
type: String
Scene Item name.
*itemId*
type: int
Scene Item ID.
*position.x*
type: double
The x position of the source from the left.
*position.y*
type: double
The y position of the source from the top.
*position.alignment*
type: int
The point on the source that the item is manipulated from. The sum of 1=Left or 2=Right, and 4=Top or 8=Bottom, or omit to center on that axis.
*rotation*
type: double
The clockwise rotation of the item in degrees around the point of alignment.
*scale.x*
type: double
The x-scale factor of the source.
*scale.y*
type: double
The y-scale factor of the source.
*crop.top*
type: int
The number of pixels cropped off the top of the source before scaling.
*crop.right*
type: int
The number of pixels cropped off the right of the source before scaling.
*crop.bottom*
type: int
The number of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int
The number of pixels cropped off the left of the source before scaling.
*visible*
type: bool
If the source is visible.
*muted*
type: bool
If the source is muted.
*locked*
type: bool
If the source's transform is locked.
*bounds.type*
type: String
Type of bounding box. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int
Alignment of the bounding box.
*bounds.x*
type: double
Width of the bounding box.
*bounds.y*
type: double
Height of the bounding box.
*sourceWidth*
type: int
Base width (without scaling) of the source
*sourceHeight*
type: int
Base source (without scaling) of the source
*width*
type: double
Scene item width (base source width multiplied by the horizontal scaling factor)
*height*
type: double
Scene item height (base source height multiplied by the vertical scaling factor)
*parentGroupName*
type: String (optional)
Name of the item's parent (if this item belongs to a group)
*groupChildren*
type: Array<SceneItemTransform> (optional)
List of children (if this item is a group)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'GetSceneItemProperties'
self.datain['name'] = None
self.datain['itemId'] = None
self.datain['position'] = None
self.datain['rotation'] = None
self.datain['scale'] = None
self.datain['crop'] = None
self.datain['visible'] = None
self.datain['muted'] = None
self.datain['locked'] = None
self.datain['bounds'] = None
self.datain['sourceWidth'] = None
self.datain['sourceHeight'] = None
self.datain['width'] = None
self.datain['height'] = None
self.datain['parentGroupName'] = None
self.datain['groupChildren'] = None
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
def getName(self):
return self.datain['name']
def getItemId(self):
return self.datain['itemId']
def getPosition(self):
return self.datain['position']
def getRotation(self):
return self.datain['rotation']
def getScale(self):
return self.datain['scale']
def getCrop(self):
return self.datain['crop']
def getVisible(self):
return self.datain['visible']
def getMuted(self):
return self.datain['muted']
def getLocked(self):
return self.datain['locked']
def getBounds(self):
return self.datain['bounds']
def getSourceWidth(self):
return self.datain['sourceWidth']
def getSourceHeight(self):
return self.datain['sourceHeight']
def getWidth(self):
return self.datain['width']
def getHeight(self):
return self.datain['height']
def getParentGroupName(self):
return self.datain['parentGroupName']
def getGroupChildren(self):
return self.datain['groupChildren']
class SetSceneItemProperties(Baserequests):
"""Sets the scene specific properties of a source. Unspecified properties will remain unchanged.
Coordinates are relative to the item's parent (the scene or group it belongs to).
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the source item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
*position.x*
type: double (optional)
The new x position of the source.
*position.y*
type: double (optional)
The new y position of the source.
*position.alignment*
type: int (optional)
The new alignment of the source.
*rotation*
type: double (optional)
The new clockwise rotation of the item in degrees.
*scale.x*
type: double (optional)
The new x scale of the item.
*scale.y*
type: double (optional)
The new y scale of the item.
*crop.top*
type: int (optional)
The new amount of pixels cropped off the top of the source before scaling.
*crop.bottom*
type: int (optional)
The new amount of pixels cropped off the bottom of the source before scaling.
*crop.left*
type: int (optional)
The new amount of pixels cropped off the left of the source before scaling.
*crop.right*
type: int (optional)
The new amount of pixels cropped off the right of the source before scaling.
*visible*
type: bool (optional)
The new visibility of the source. 'true' shows source, 'false' hides source.
*locked*
type: bool (optional)
The new locked status of the source. 'true' keeps it in its current position, 'false' allows movement.
*bounds.type*
type: String (optional)
The new bounds type of the source. Can be "OBS_BOUNDS_STRETCH", "OBS_BOUNDS_SCALE_INNER", "OBS_BOUNDS_SCALE_OUTER", "OBS_BOUNDS_SCALE_TO_WIDTH", "OBS_BOUNDS_SCALE_TO_HEIGHT", "OBS_BOUNDS_MAX_ONLY" or "OBS_BOUNDS_NONE".
*bounds.alignment*
type: int (optional)
The new alignment of the bounding box. (0-2, 4-6, 8-10)
*bounds.x*
type: double (optional)
The new width of the bounding box.
*bounds.y*
type: double (optional)
The new height of the bounding box.
"""
def __init__(self, item, scene_name=None, position=None, rotation=None, scale=None, crop=None, visible=None, locked=None, bounds=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemProperties'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
self.dataout['position'] = position
self.dataout['rotation'] = rotation
self.dataout['scale'] = scale
self.dataout['crop'] = crop
self.dataout['visible'] = visible
self.dataout['locked'] = locked
self.dataout['bounds'] = bounds
class ResetSceneItem(Baserequests):
"""Reset a scene item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String | Object
Scene Item name (if this field is a string) or specification (if it is an object).
*item.name*
type: String (optional)
Scene Item name (if the `item` field is an object)
*item.id*
type: int (optional)
Scene Item ID (if the `item` field is an object)
"""
def __init__(self, item, scene_name=None):
Baserequests.__init__(self)
self.name = 'ResetSceneItem'
self.dataout['item'] = item
self.dataout['scene-name'] = scene_name
class SetSceneItemRender(Baserequests):
"""Show or hide a specified source item in a specified scene.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the currently active scene.
*source*
type: String
Scene Item name.
*render*
type: boolean
true = shown ; false = hidden
"""
def __init__(self, source, render, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemRender'
self.dataout['source'] = source
self.dataout['render'] = render
self.dataout['scene-name'] = scene_name
class SetSceneItemPosition(Baserequests):
"""Sets the coordinates of a specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x*
type: double
X coordinate.
*y*
type: double
Y coordinate.
"""
def __init__(self, item, x, y, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemPosition'
self.dataout['item'] = item
self.dataout['x'] = x
self.dataout['y'] = y
self.dataout['scene-name'] = scene_name
class SetSceneItemTransform(Baserequests):
"""Set the transform of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*x_scale*
type: double
Width scale factor.
*y_scale*
type: double
Height scale factor.
*rotation*
type: double
Source item rotation (in degrees).
"""
def __init__(self, item, x_scale, y_scale, rotation, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemTransform'
self.dataout['item'] = item
self.dataout['x-scale'] = x_scale
self.dataout['y-scale'] = y_scale
self.dataout['rotation'] = rotation
self.dataout['scene-name'] = scene_name
class SetSceneItemCrop(Baserequests):
"""Sets the crop coordinates of the specified source item.
:Arguments:
*scene_name*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: String
Scene Item name.
*top*
type: int
Pixel position of the top of the source item.
*bottom*
type: int
Pixel position of the bottom of the source item.
*left*
type: int
Pixel position of the left of the source item.
*right*
type: int
Pixel position of the right of the source item.
"""
def __init__(self, item, top, bottom, left, right, scene_name=None):
Baserequests.__init__(self)
self.name = 'SetSceneItemCrop'
self.dataout['item'] = item
self.dataout['top'] = top
self.dataout['bottom'] = bottom
self.dataout['left'] = left
self.dataout['right'] = right
self.dataout['scene-name'] = scene_name
class DeleteSceneItem(Baserequests):
"""Deletes a scene item.
:Arguments:
*scene*
type: String (optional)
Name of the scene the scene item belongs to. Defaults to the current scene.
*item*
type: Object
Scene item to delete (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
"""
def __init__(self, item, scene=None):
Baserequests.__init__(self)
self.name = 'DeleteSceneItem'
self.dataout['item'] = item
self.dataout['scene'] = scene
class AddSceneItem(Baserequests):
"""Creates a scene item in a scene. In other words, this is how you add a source into a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create the scene item in
*sourceName*
type: String
Name of the source to be added
*setVisible*
type: boolean
Whether to make the sceneitem visible on creation or not. Default `true`
:Returns:
*itemId*
type: int
Numerical ID of the created scene item
"""
def __init__(self, sceneName, sourceName, setVisible):
Baserequests.__init__(self)
self.name = 'AddSceneItem'
self.datain['itemId'] = None
self.dataout['sceneName'] = sceneName
self.dataout['sourceName'] = sourceName
self.dataout['setVisible'] = setVisible
def getItemId(self):
return self.datain['itemId']
class DuplicateSceneItem(Baserequests):
"""Duplicates a scene item.
:Arguments:
*fromScene*
type: String (optional)
Name of the scene to copy the item from. Defaults to the current scene.
*toScene*
type: String (optional)
Name of the scene to create the item in. Defaults to the current scene.
*item*
type: Object
Scene Item to duplicate from the source scene (required)
*item.name*
type: String
Scene Item name (prefer `id`, including both is acceptable).
*item.id*
type: int
Scene Item ID.
:Returns:
*scene*
type: String
Name of the scene where the new item was created
*item*
type: Object
New item info
*item.id*
type: int
New item ID
*item.name*
type: String
New item name
"""
def __init__(self, item, fromScene=None, toScene=None):
Baserequests.__init__(self)
self.name = 'DuplicateSceneItem'
self.datain['scene'] = None
self.datain['item'] = None
self.dataout['item'] = item
self.dataout['fromScene'] = fromScene
self.dataout['toScene'] = toScene
def getScene(self):
return self.datain['scene']
def getItem(self):
return self.datain['item']
class SetCurrentScene(Baserequests):
"""Switch to the specified scene.
:Arguments:
*scene_name*
type: String
Name of the scene to switch to.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetCurrentScene'
self.dataout['scene-name'] = scene_name
class GetCurrentScene(Baserequests):
"""Get the current scene's name and source items.
:Returns:
*name*
type: String
Name of the currently active scene.
*sources*
type: Array<SceneItem>
Ordered list of the current scene's source items.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class GetSceneList(Baserequests):
"""Get a list of scenes in the currently active profile.
:Returns:
*current_scene*
type: String
Name of the currently active scene.
*scenes*
type: Array<Scene>
Ordered list of the current profile's scenes (See [GetCurrentScene](#getcurrentscene) for more information).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetSceneList'
self.datain['current-scene'] = None
self.datain['scenes'] = None
def getCurrentScene(self):
return self.datain['current-scene']
def getScenes(self):
return self.datain['scenes']
class CreateScene(Baserequests):
"""Create a new scene scene.
:Arguments:
*sceneName*
type: String
Name of the scene to create.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'CreateScene'
self.dataout['sceneName'] = sceneName
class ReorderSceneItems(Baserequests):
"""Changes the order of scene items in the requested scene.
:Arguments:
*scene*
type: String (optional)
Name of the scene to reorder (defaults to current).
*items*
type: Array<Scene>
Ordered list of objects with name and/or id specified. Id preferred due to uniqueness per scene
*items.*.id*
type: int (optional)
Id of a specific scene item. Unique on a scene by scene basis.
*items.*.name*
type: String (optional)
Name of a scene item. Sufficiently unique if no scene items share sources within the scene.
"""
def __init__(self, items, scene=None):
Baserequests.__init__(self)
self.name = 'ReorderSceneItems'
self.dataout['items'] = items
self.dataout['scene'] = scene
class SetSceneTransitionOverride(Baserequests):
"""Set a scene to use a specific transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
*transitionName*
type: String
Name of the transition to use.
*transitionDuration*
type: int (Optional)
Duration in milliseconds of the transition if transition is not fixed. Defaults to the current duration specified in the UI if there is no current override and this value is not given.
"""
def __init__(self, sceneName, transitionName, transitionDuration):
Baserequests.__init__(self)
self.name = 'SetSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
self.dataout['transitionName'] = transitionName
self.dataout['transitionDuration'] = transitionDuration
class RemoveSceneTransitionOverride(Baserequests):
"""Remove any transition override on a scene.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'RemoveSceneTransitionOverride'
self.dataout['sceneName'] = sceneName
class GetSceneTransitionOverride(Baserequests):
"""Get the current scene transition override.
:Arguments:
*sceneName*
type: String
Name of the scene to switch to.
:Returns:
*transitionName*
type: String
Name of the current overriding transition. Empty string if no override is set.
*transitionDuration*
type: int
Transition duration. `-1` if no override is set.
"""
def __init__(self, sceneName):
Baserequests.__init__(self)
self.name = 'GetSceneTransitionOverride'
self.datain['transitionName'] = None
self.datain['transitionDuration'] = None
self.dataout['sceneName'] = sceneName
def getTransitionName(self):
return self.datain['transitionName']
def getTransitionDuration(self):
return self.datain['transitionDuration']
class GetStreamingStatus(Baserequests):
"""Get current streaming and recording status.
:Returns:
*streaming*
type: boolean
Current streaming status.
*recording*
type: boolean
Current recording status.
*stream_timecode*
type: String (optional)
Time elapsed since streaming started (only present if currently streaming).
*rec_timecode*
type: String (optional)
Time elapsed since recording started (only present if currently recording).
*preview_only*
type: boolean
Always false. Retrocompatibility with OBSRemote.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamingStatus'
self.datain['streaming'] = None
self.datain['recording'] = None
self.datain['stream-timecode'] = None
self.datain['rec-timecode'] = None
self.datain['preview-only'] = None
def getStreaming(self):
return self.datain['streaming']
def getRecording(self):
return self.datain['recording']
def getStreamTimecode(self):
return self.datain['stream-timecode']
def getRecTimecode(self):
return self.datain['rec-timecode']
def getPreviewOnly(self):
return self.datain['preview-only']
class StartStopStreaming(Baserequests):
"""Toggle streaming on or off (depending on the current stream state).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StartStopStreaming'
class StartStreaming(Baserequests):
"""Start streaming.
Will return an `error` if streaming is already active.
:Arguments:
*stream*
type: Object (optional)
Special stream configuration. Please note: these won't be saved to OBS' configuration.
*stream.type*
type: String (optional)
If specified ensures the type of stream matches the given type (usually 'rtmp_custom' or 'rtmp_common'). If the currently configured stream type does not match the given stream type, all settings must be specified in the `settings` object or an error will occur when starting the stream.
*stream.metadata*
type: Object (optional)
Adds the given object parameters as encoded query string parameters to the 'key' of the RTMP stream. Used to pass data to the RTMP service about the streaming. May be any String, Numeric, or Boolean field.
*stream.settings*
type: Object (optional)
Settings for the stream.
*stream.settings.server*
type: String (optional)
The publish URL.
*stream.settings.key*
type: String (optional)
The publish key of the stream.
*stream.settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*stream.settings.username*
type: String (optional)
If authentication is enabled, the username for the streaming server. Ignored if `use_auth` is not set to `true`.
*stream.settings.password*
type: String (optional)
If authentication is enabled, the password for the streaming server. Ignored if `use_auth` is not set to `true`.
"""
def __init__(self, stream=None):
Baserequests.__init__(self)
self.name = 'StartStreaming'
self.dataout['stream'] = stream
class StopStreaming(Baserequests):
"""Stop streaming.
Will return an `error` if streaming is not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'StopStreaming'
class SetStreamSettings(Baserequests):
"""Sets one or more attributes of the current streaming server settings. Any options not passed will remain unchanged. Returns the updated settings in response. If 'type' is different than the current streaming service type, all settings are required. Returns the full settings of the stream (the same as GetStreamSettings).
:Arguments:
*type*
type: String
The type of streaming service configuration, usually `rtmp_custom` or `rtmp_common`.
*settings*
type: Object
The actual settings of the stream.
*settings.server*
type: String (optional)
The publish URL.
*settings.key*
type: String (optional)
The publish key.
*settings.use_auth*
type: boolean (optional)
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String (optional)
The username for the streaming service.
*settings.password*
type: String (optional)
The password for the streaming service.
*save*
type: boolean
Persist the settings to disk.
"""
def __init__(self, type, settings, save):
Baserequests.__init__(self)
self.name = 'SetStreamSettings'
self.dataout['type'] = type
self.dataout['settings'] = settings
self.dataout['save'] = save
class GetStreamSettings(Baserequests):
"""Get the current streaming server settings.
:Returns:
*type*
type: String
The type of streaming service configuration. Possible values: 'rtmp_custom' or 'rtmp_common'.
*settings*
type: Object
Stream settings object.
*settings.server*
type: String
The publish URL.
*settings.key*
type: String
The publish key of the stream.
*settings.use_auth*
type: boolean
Indicates whether authentication should be used when connecting to the streaming server.
*settings.username*
type: String
The username to use when accessing the streaming server. Only present if `use_auth` is `true`.
*settings.password*
type: String
The password to use when accessing the streaming server. Only present if `use_auth` is `true`.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStreamSettings'
self.datain['type'] = None
self.datain['settings'] = None
def getType(self):
return self.datain['type']
def getSettings(self):
return self.datain['settings']
class SaveStreamSettings(Baserequests):
"""Save the current streaming server settings to disk.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'SaveStreamSettings'
class SendCaptions(Baserequests):
"""Send the provided text as embedded CEA-608 caption data.
:Arguments:
*text*
type: String
Captions text
"""
def __init__(self, text):
Baserequests.__init__(self)
self.name = 'SendCaptions'
self.dataout['text'] = text
class GetStudioModeStatus(Baserequests):
"""Indicates if Studio Mode is currently enabled.
:Returns:
*studio_mode*
type: boolean
Indicates if Studio Mode is enabled.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetStudioModeStatus'
self.datain['studio-mode'] = None
def getStudioMode(self):
return self.datain['studio-mode']
class GetPreviewScene(Baserequests):
"""Get the name of the currently previewed scene and its list of sources.
Will return an `error` if Studio Mode is not enabled.
:Returns:
*name*
type: String
The name of the active preview scene.
*sources*
type: Array<SceneItem>
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetPreviewScene'
self.datain['name'] = None
self.datain['sources'] = None
def getName(self):
return self.datain['name']
def getSources(self):
return self.datain['sources']
class SetPreviewScene(Baserequests):
"""Set the active preview scene.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*scene_name*
type: String
The name of the scene to preview.
"""
def __init__(self, scene_name):
Baserequests.__init__(self)
self.name = 'SetPreviewScene'
self.dataout['scene-name'] = scene_name
class TransitionToProgram(Baserequests):
"""Transitions the currently previewed scene to the main output.
Will return an `error` if Studio Mode is not enabled.
:Arguments:
*with_transition*
type: Object (optional)
Change the active transition before switching scenes. Defaults to the active transition.
*with_transition.name*
type: String
Name of the transition.
*with_transition.duration*
type: int (optional)
Transition duration (in milliseconds).
"""
def __init__(self, with_transition=None):
Baserequests.__init__(self)
self.name = 'TransitionToProgram'
self.dataout['with-transition'] = with_transition
class EnableStudioMode(Baserequests):
"""Enables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'EnableStudioMode'
class DisableStudioMode(Baserequests):
"""Disables Studio Mode.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'DisableStudioMode'
class ToggleStudioMode(Baserequests):
"""Toggles Studio Mode (depending on the current state of studio mode).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ToggleStudioMode'
class GetTransitionList(Baserequests):
"""List of all transitions available in the frontend's dropdown menu.
:Returns:
*current_transition*
type: String
Name of the currently active transition.
*transitions*
type: Array<Object>
List of transitions.
*transitions.*.name*
type: String
Name of the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionList'
self.datain['current-transition'] = None
self.datain['transitions'] = None
def getCurrentTransition(self):
return self.datain['current-transition']
def getTransitions(self):
return self.datain['transitions']
class GetCurrentTransition(Baserequests):
"""Get the name of the currently selected transition in the frontend's dropdown menu.
:Returns:
*name*
type: String
Name of the selected transition.
*duration*
type: int (optional)
Transition duration (in milliseconds) if supported by the transition.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetCurrentTransition'
self.datain['name'] = None
self.datain['duration'] = None
def getName(self):
return self.datain['name']
def getDuration(self):
return self.datain['duration']
class SetCurrentTransition(Baserequests):
"""Set the active transition.
:Arguments:
*transition_name*
type: String
The name of the transition.
"""
def __init__(self, transition_name):
Baserequests.__init__(self)
self.name = 'SetCurrentTransition'
self.dataout['transition-name'] = transition_name
class SetTransitionDuration(Baserequests):
"""Set the duration of the currently selected transition if supported.
:Arguments:
*duration*
type: int
Desired duration of the transition (in milliseconds).
"""
def __init__(self, duration):
Baserequests.__init__(self)
self.name = 'SetTransitionDuration'
self.dataout['duration'] = duration
class GetTransitionDuration(Baserequests):
"""Get the duration of the currently selected transition if supported.
:Returns:
*transition_duration*
type: int
Duration of the current transition (in milliseconds).
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionDuration'
self.datain['transition-duration'] = None
def getTransitionDuration(self):
return self.datain['transition-duration']
class GetTransitionPosition(Baserequests):
"""Get the position of the current transition.
:Returns:
*position*
type: double
current transition position. This value will be between 0.0 and 1.0. Note: Transition returns 1.0 when not active.
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'GetTransitionPosition'
self.datain['position'] = None
def getPosition(self):
return self.datain['position']
class GetTransitionSettings(Baserequests):
"""Get the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
:Returns:
*transitionSettings*
type: Object
Current transition settings
"""
def __init__(self, transitionName):
Baserequests.__init__(self)
self.name = 'GetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
def getTransitionSettings(self):
return self.datain['transitionSettings']
class SetTransitionSettings(Baserequests):
"""Change the current settings of a transition
:Arguments:
*transitionName*
type: String
Transition name
*transitionSettings*
type: Object
Transition settings (they can be partial)
:Returns:
*transitionSettings*
type: Object
Updated transition settings
"""
def __init__(self, transitionName, transitionSettings):
Baserequests.__init__(self)
self.name = 'SetTransitionSettings'
self.datain['transitionSettings'] = None
self.dataout['transitionName'] = transitionName
self.dataout['transitionSettings'] = transitionSettings
def getTransitionSettings(self):
return self.datain['transitionSettings']
class ReleaseTBar(Baserequests):
"""Release the T-Bar (like a user releasing their mouse button after moving it).
*YOU MUST CALL THIS if you called `SetTBarPosition` with the `release` parameter set to `false`.*
"""
def __init__(self):
Baserequests.__init__(self)
self.name = 'ReleaseTBar'
class SetTBarPosition(Baserequests):
"""
If your code needs to perform multiple successive T-Bar moves (e.g. : in an animation, or in response to a user moving a T-Bar control in your User Interface), set `release` to false and call `ReleaseTBar` later once the animation/interaction is over.
:Arguments:
*position*
type: double
T-Bar position. This value must be between 0.0 and 1.0.
*release*
type: boolean (optional)
Whether or not the T-Bar gets released automatically after setting its new position (like a user releasing their mouse button after moving the T-Bar). Call `ReleaseTBar` manually if you set `release` to false. Defaults to true.
"""
def __init__(self, position, release=None):
Baserequests.__init__(self)
self.name = 'SetTBarPosition'
self.dataout['position'] = position
self.dataout['release'] = release
|
simple_history/tests/custom_user/admin.py | rdurica/django-simple-history | 911 | 3321 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import CustomUser
admin.site.register(CustomUser, UserAdmin)
|
omegaconf/_utils.py | sugatoray/omegaconf | 1,091 | 3330 | <reponame>sugatoray/omegaconf<gh_stars>1000+
import copy
import os
import re
import string
import sys
import warnings
from contextlib import contextmanager
from enum import Enum
from textwrap import dedent
from typing import (
Any,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
Union,
get_type_hints,
)
import yaml
from .errors import (
ConfigIndexError,
ConfigTypeError,
ConfigValueError,
GrammarParseError,
OmegaConfBaseException,
ValidationError,
)
from .grammar_parser import SIMPLE_INTERPOLATION_PATTERN, parse
try:
import dataclasses
except ImportError: # pragma: no cover
dataclasses = None # type: ignore # pragma: no cover
try:
import attr
except ImportError: # pragma: no cover
attr = None # type: ignore # pragma: no cover
# Regexprs to match key paths like: a.b, a[b], ..a[c].d, etc.
# We begin by matching the head (in these examples: a, a, ..a).
# This can be read as "dots followed by any character but `.` or `[`"
# Note that a key starting with brackets, like [a], is purposedly *not*
# matched here and will instead be handled in the next regex below (this
# is to keep this regex simple).
KEY_PATH_HEAD = re.compile(r"(\.)*[^.[]*")
# Then we match other keys. The following expression matches one key and can
# be read as a choice between two syntaxes:
# - `.` followed by anything except `.` or `[` (ex: .b, .d)
# - `[` followed by anything then `]` (ex: [b], [c])
KEY_PATH_OTHER = re.compile(r"\.([^.[]*)|\[(.*?)\]")
# source: https://yaml.org/type/bool.html
YAML_BOOL_TYPES = [
"y",
"Y",
"yes",
"Yes",
"YES",
"n",
"N",
"no",
"No",
"NO",
"true",
"True",
"TRUE",
"false",
"False",
"FALSE",
"on",
"On",
"ON",
"off",
"Off",
"OFF",
]
class Marker:
def __init__(self, desc: str):
self.desc = desc
def __repr__(self) -> str:
return self.desc
# To be used as default value when `None` is not an option.
_DEFAULT_MARKER_: Any = Marker("_DEFAULT_MARKER_")
class OmegaConfDumper(yaml.Dumper): # type: ignore
str_representer_added = False
@staticmethod
def str_representer(dumper: yaml.Dumper, data: str) -> yaml.ScalarNode:
with_quotes = yaml_is_bool(data) or is_int(data) or is_float(data)
return dumper.represent_scalar(
yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG,
data,
style=("'" if with_quotes else None),
)
def get_omega_conf_dumper() -> Type[OmegaConfDumper]:
if not OmegaConfDumper.str_representer_added:
OmegaConfDumper.add_representer(str, OmegaConfDumper.str_representer)
OmegaConfDumper.str_representer_added = True
return OmegaConfDumper
def yaml_is_bool(b: str) -> bool:
return b in YAML_BOOL_TYPES
def get_yaml_loader() -> Any:
class OmegaConfLoader(yaml.SafeLoader): # type: ignore
def construct_mapping(self, node: yaml.Node, deep: bool = False) -> Any:
keys = set()
for key_node, value_node in node.value:
if key_node.tag != yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG:
continue
if key_node.value in keys:
raise yaml.constructor.ConstructorError(
"while constructing a mapping",
node.start_mark,
f"found duplicate key {key_node.value}",
key_node.start_mark,
)
keys.add(key_node.value)
return super().construct_mapping(node, deep=deep)
loader = OmegaConfLoader
loader.add_implicit_resolver(
"tag:yaml.org,2002:float",
re.compile(
"""^(?:
[-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)?
|[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+)
|\\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]*
|[-+]?\\.(?:inf|Inf|INF)
|\\.(?:nan|NaN|NAN))$""",
re.X,
),
list("-+0123456789."),
)
loader.yaml_implicit_resolvers = {
key: [
(tag, regexp)
for tag, regexp in resolvers
if tag != "tag:yaml.org,2002:timestamp"
]
for key, resolvers in loader.yaml_implicit_resolvers.items()
}
return loader
def _get_class(path: str) -> type:
from importlib import import_module
module_path, _, class_name = path.rpartition(".")
mod = import_module(module_path)
try:
klass: type = getattr(mod, class_name)
except AttributeError:
raise ImportError(f"Class {class_name} is not in module {module_path}")
return klass
def _is_union(type_: Any) -> bool:
return getattr(type_, "__origin__", None) is Union
def _resolve_optional(type_: Any) -> Tuple[bool, Any]:
"""Check whether `type_` is equivalent to `typing.Optional[T]` for some T."""
if getattr(type_, "__origin__", None) is Union:
args = type_.__args__
if len(args) == 2 and args[1] == type(None): # noqa E721
return True, args[0]
if type_ is Any:
return True, Any
return False, type_
def _is_optional(obj: Any, key: Optional[Union[int, str]] = None) -> bool:
"""Check `obj` metadata to see if the given node is optional."""
from .base import Container, Node
if key is not None:
assert isinstance(obj, Container)
obj = obj._get_node(key)
if isinstance(obj, Node):
return obj._is_optional()
else:
# In case `obj` is not a Node, treat it as optional by default.
# This is used in `ListConfig.append` and `ListConfig.insert`
# where the appended/inserted value might or might not be a Node.
return True
def _resolve_forward(type_: Type[Any], module: str) -> Type[Any]:
import typing # lgtm [py/import-and-import-from]
forward = typing.ForwardRef if hasattr(typing, "ForwardRef") else typing._ForwardRef # type: ignore
if type(type_) is forward:
return _get_class(f"{module}.{type_.__forward_arg__}")
else:
if is_dict_annotation(type_):
kt, vt = get_dict_key_value_types(type_)
if kt is not None:
kt = _resolve_forward(kt, module=module)
if vt is not None:
vt = _resolve_forward(vt, module=module)
return Dict[kt, vt] # type: ignore
if is_list_annotation(type_):
et = get_list_element_type(type_)
if et is not None:
et = _resolve_forward(et, module=module)
return List[et] # type: ignore
return type_
def extract_dict_subclass_data(obj: Any, parent: Any) -> Optional[Dict[str, Any]]:
"""Check if obj is an instance of a subclass of Dict. If so, extract the Dict keys/values."""
from omegaconf.omegaconf import _maybe_wrap
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
subclasses_dict = is_dict_subclass(obj_type)
if subclasses_dict:
warnings.warn(
f"Class `{obj_type.__name__}` subclasses `Dict`."
+ " Subclassing `Dict` in Structured Config classes is deprecated,"
+ " see github.com/omry/omegaconf/issues/663",
UserWarning,
stacklevel=9,
)
if is_type:
return None
elif subclasses_dict:
dict_subclass_data = {}
key_type, element_type = get_dict_key_value_types(obj_type)
for name, value in obj.items():
is_optional, type_ = _resolve_optional(element_type)
type_ = _resolve_forward(type_, obj.__module__)
try:
dict_subclass_data[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=parent,
)
except ValidationError as ex:
format_and_raise(
node=None, key=name, value=value, cause=ex, msg=str(ex)
)
return dict_subclass_data
else:
return None
def get_attr_class_field_names(obj: Any) -> List[str]:
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
return list(attr.fields_dict(obj_type))
def get_attr_data(obj: Any, allow_objects: Optional[bool] = None) -> Dict[str, Any]:
from omegaconf.omegaconf import OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
from omegaconf import MISSING
d = {}
is_type = isinstance(obj, type)
obj_type = obj if is_type else type(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
for name, attrib in attr.fields_dict(obj_type).items():
is_optional, type_ = _resolve_optional(attrib.type)
type_ = _resolve_forward(type_, obj.__module__)
if not is_type:
value = getattr(obj, name)
else:
value = attrib.default
if value == attr.NOTHING:
value = MISSING
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def get_dataclass_field_names(obj: Any) -> List[str]:
return [field.name for field in dataclasses.fields(obj)]
def get_dataclass_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
from omegaconf.omegaconf import MISSING, OmegaConf, _maybe_wrap
flags = {"allow_objects": allow_objects} if allow_objects is not None else {}
d = {}
obj_type = get_type_of(obj)
dummy_parent = OmegaConf.create({}, flags=flags)
dummy_parent._metadata.object_type = obj_type
resolved_hints = get_type_hints(obj_type)
for field in dataclasses.fields(obj):
name = field.name
is_optional, type_ = _resolve_optional(resolved_hints[field.name])
type_ = _resolve_forward(type_, obj.__module__)
if hasattr(obj, name):
value = getattr(obj, name)
if value == dataclasses.MISSING:
value = MISSING
else:
if field.default_factory == dataclasses.MISSING: # type: ignore
value = MISSING
else:
value = field.default_factory() # type: ignore
if _is_union(type_):
e = ConfigValueError(
f"Union types are not supported:\n{name}: {type_str(type_)}"
)
format_and_raise(node=None, key=None, value=value, cause=e, msg=str(e))
try:
d[name] = _maybe_wrap(
ref_type=type_,
is_optional=is_optional,
key=name,
value=value,
parent=dummy_parent,
)
except (ValidationError, GrammarParseError) as ex:
format_and_raise(
node=dummy_parent, key=name, value=value, cause=ex, msg=str(ex)
)
d[name]._set_parent(None)
dict_subclass_data = extract_dict_subclass_data(obj=obj, parent=dummy_parent)
if dict_subclass_data is not None:
d.update(dict_subclass_data)
return d
def is_dataclass(obj: Any) -> bool:
from omegaconf.base import Node
if dataclasses is None or isinstance(obj, Node):
return False
return dataclasses.is_dataclass(obj)
def is_attr_class(obj: Any) -> bool:
from omegaconf.base import Node
if attr is None or isinstance(obj, Node):
return False
return attr.has(obj)
def is_structured_config(obj: Any) -> bool:
return is_attr_class(obj) or is_dataclass(obj)
def is_dataclass_frozen(type_: Any) -> bool:
return type_.__dataclass_params__.frozen # type: ignore
def is_attr_frozen(type_: type) -> bool:
# This is very hacky and probably fragile as well.
# Unfortunately currently there isn't an official API in attr that can detect that.
# noinspection PyProtectedMember
return type_.__setattr__ == attr._make._frozen_setattrs # type: ignore
def get_type_of(class_or_object: Any) -> Type[Any]:
type_ = class_or_object
if not isinstance(type_, type):
type_ = type(class_or_object)
assert isinstance(type_, type)
return type_
def is_structured_config_frozen(obj: Any) -> bool:
type_ = get_type_of(obj)
if is_dataclass(type_):
return is_dataclass_frozen(type_)
if is_attr_class(type_):
return is_attr_frozen(type_)
return False
def get_structured_config_field_names(obj: Any) -> List[str]:
if is_dataclass(obj):
return get_dataclass_field_names(obj)
elif is_attr_class(obj):
return get_attr_class_field_names(obj)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
def get_structured_config_data(
obj: Any, allow_objects: Optional[bool] = None
) -> Dict[str, Any]:
if is_dataclass(obj):
return get_dataclass_data(obj, allow_objects=allow_objects)
elif is_attr_class(obj):
return get_attr_data(obj, allow_objects=allow_objects)
else:
raise ValueError(f"Unsupported type: {type(obj).__name__}")
class ValueKind(Enum):
VALUE = 0
MANDATORY_MISSING = 1
INTERPOLATION = 2
def _is_missing_value(value: Any) -> bool:
from omegaconf import Node
if isinstance(value, Node):
value = value._value()
return _is_missing_literal(value)
def _is_missing_literal(value: Any) -> bool:
# Uses literal '???' instead of the MISSING const for performance reasons.
return isinstance(value, str) and value == "???"
def _is_none(
value: Any, resolve: bool = False, throw_on_resolution_failure: bool = True
) -> bool:
from omegaconf import Node
if not isinstance(value, Node):
return value is None
if resolve:
value = value._maybe_dereference_node(
throw_on_resolution_failure=throw_on_resolution_failure
)
if not throw_on_resolution_failure and value is None:
# Resolution failure: consider that it is *not* None.
return False
assert isinstance(value, Node)
return value._is_none()
def get_value_kind(
value: Any, strict_interpolation_validation: bool = False
) -> ValueKind:
"""
Determine the kind of a value
Examples:
VALUE: "10", "20", True
MANDATORY_MISSING: "???"
INTERPOLATION: "${foo.bar}", "${foo.${bar}}", "${foo:bar}", "[${foo}, ${bar}]",
"ftp://${host}/path", "${foo:${bar}, [true], {'baz': ${baz}}}"
:param value: Input to classify.
:param strict_interpolation_validation: If `True`, then when `value` is a string
containing "${", it is parsed to validate the interpolation syntax. If `False`,
this parsing step is skipped: this is more efficient, but will not detect errors.
"""
if _is_missing_value(value):
return ValueKind.MANDATORY_MISSING
value = _get_value(value)
# We identify potential interpolations by the presence of "${" in the string.
# Note that escaped interpolations (ex: "esc: \${bar}") are identified as
# interpolations: this is intended, since they must be processed as interpolations
# for the string to be properly un-escaped.
# Keep in mind that invalid interpolations will only be detected when
# `strict_interpolation_validation` is True.
if isinstance(value, str) and "${" in value:
if strict_interpolation_validation:
# First try the cheap regex matching that detects common interpolations.
if SIMPLE_INTERPOLATION_PATTERN.match(value) is None:
# If no match, do the more expensive grammar parsing to detect errors.
parse(value)
return ValueKind.INTERPOLATION
else:
return ValueKind.VALUE
# DEPRECATED: remove in 2.2
def is_bool(st: str) -> bool:
st = str.lower(st)
return st == "true" or st == "false"
def is_float(st: str) -> bool:
try:
float(st)
return True
except ValueError:
return False
def is_int(st: str) -> bool:
try:
int(st)
return True
except ValueError:
return False
# DEPRECATED: remove in 2.2
def decode_primitive(s: str) -> Any:
if is_bool(s):
return str.lower(s) == "true"
if is_int(s):
return int(s)
if is_float(s):
return float(s)
return s
def is_primitive_list(obj: Any) -> bool:
from .base import Container
return not isinstance(obj, Container) and isinstance(obj, (list, tuple))
def is_primitive_dict(obj: Any) -> bool:
t = get_type_of(obj)
return t is dict
def is_dict_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Dict or type_ is Dict # pragma: no cover
else: # pragma: no cover
# type_dict is a bit hard to detect.
# this support is tentative, if it eventually causes issues in other areas it may be dropped.
typed_dict = hasattr(type_, "__base__") and type_.__base__ == dict
return origin is dict or typed_dict
def is_list_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is List or type_ is List # pragma: no cover
else:
return origin is list # pragma: no cover
def is_tuple_annotation(type_: Any) -> bool:
origin = getattr(type_, "__origin__", None)
if sys.version_info < (3, 7, 0):
return origin is Tuple or type_ is Tuple # pragma: no cover
else:
return origin is tuple # pragma: no cover
def is_dict_subclass(type_: Any) -> bool:
return type_ is not None and isinstance(type_, type) and issubclass(type_, Dict)
def is_dict(obj: Any) -> bool:
return is_primitive_dict(obj) or is_dict_annotation(obj) or is_dict_subclass(obj)
def is_primitive_container(obj: Any) -> bool:
return is_primitive_list(obj) or is_primitive_dict(obj)
def get_list_element_type(ref_type: Optional[Type[Any]]) -> Any:
args = getattr(ref_type, "__args__", None)
if ref_type is not List and args is not None and args[0]:
element_type = args[0]
else:
element_type = Any
return element_type
def get_dict_key_value_types(ref_type: Any) -> Tuple[Any, Any]:
args = getattr(ref_type, "__args__", None)
if args is None:
bases = getattr(ref_type, "__orig_bases__", None)
if bases is not None and len(bases) > 0:
args = getattr(bases[0], "__args__", None)
key_type: Any
element_type: Any
if ref_type is None or ref_type == Dict:
key_type = Any
element_type = Any
else:
if args is not None:
key_type = args[0]
element_type = args[1]
else:
key_type = Any
element_type = Any
return key_type, element_type
def valid_value_annotation_type(type_: Any) -> bool:
return type_ is Any or is_primitive_type(type_) or is_structured_config(type_)
def _valid_dict_key_annotation_type(type_: Any) -> bool:
from omegaconf import DictKeyType
return type_ is None or type_ is Any or issubclass(type_, DictKeyType.__args__) # type: ignore
def is_primitive_type(type_: Any) -> bool:
type_ = get_type_of(type_)
return issubclass(type_, Enum) or type_ in (int, float, bool, str, type(None))
def _is_interpolation(v: Any, strict_interpolation_validation: bool = False) -> bool:
if isinstance(v, str):
ret = (
get_value_kind(v, strict_interpolation_validation)
== ValueKind.INTERPOLATION
)
assert isinstance(ret, bool)
return ret
return False
def _get_value(value: Any) -> Any:
from .base import Container
from .nodes import ValueNode
if isinstance(value, ValueNode):
return value._value()
elif isinstance(value, Container):
boxed = value._value()
if boxed is None or _is_missing_literal(boxed) or _is_interpolation(boxed):
return boxed
# return primitives and regular OmegaConf Containers as is
return value
def get_ref_type(obj: Any, key: Any = None) -> Optional[Type[Any]]:
from omegaconf import Container, Node
if isinstance(obj, Container):
if key is not None:
obj = obj._get_node(key)
else:
if key is not None:
raise ValueError("Key must only be provided when obj is a container")
if isinstance(obj, Node):
ref_type = obj._metadata.ref_type
if obj._is_optional() and ref_type is not Any:
return Optional[ref_type] # type: ignore
else:
return ref_type
else:
return Any # type: ignore
def _raise(ex: Exception, cause: Exception) -> None:
# Set the environment variable OC_CAUSE=1 to get a stacktrace that includes the
# causing exception.
env_var = os.environ["OC_CAUSE"] if "OC_CAUSE" in os.environ else None
debugging = sys.gettrace() is not None
full_backtrace = (debugging and not env_var == "0") or (env_var == "1")
if full_backtrace:
ex.__cause__ = cause
else:
ex.__cause__ = None
raise ex.with_traceback(sys.exc_info()[2]) # set end OC_CAUSE=1 for full backtrace
def format_and_raise(
node: Any,
key: Any,
value: Any,
msg: str,
cause: Exception,
type_override: Any = None,
) -> None:
from omegaconf import OmegaConf
from omegaconf.base import Node
if isinstance(cause, AssertionError):
raise
if isinstance(cause, OmegaConfBaseException) and cause._initialized:
ex = cause
if type_override is not None:
ex = type_override(str(cause))
ex.__dict__ = copy.deepcopy(cause.__dict__)
_raise(ex, cause)
object_type: Optional[Type[Any]]
object_type_str: Optional[str] = None
ref_type: Optional[Type[Any]]
ref_type_str: Optional[str]
child_node: Optional[Node] = None
if node is None:
full_key = key if key is not None else ""
object_type = None
ref_type = None
ref_type_str = None
else:
if key is not None and not node._is_none():
child_node = node._get_node(key, validate_access=False)
try:
full_key = node._get_full_key(key=key)
except Exception as exc:
# Since we are handling an exception, raising a different one here would
# be misleading. Instead, we display it in the key.
full_key = f"<unresolvable due to {type(exc).__name__}: {exc}>"
object_type = OmegaConf.get_type(node)
object_type_str = type_str(object_type)
ref_type = get_ref_type(node)
ref_type_str = type_str(ref_type)
msg = string.Template(msg).safe_substitute(
REF_TYPE=ref_type_str,
OBJECT_TYPE=object_type_str,
KEY=key,
FULL_KEY=full_key,
VALUE=value,
VALUE_TYPE=type_str(type(value), include_module_name=True),
KEY_TYPE=f"{type(key).__name__}",
)
if ref_type not in (None, Any):
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
reference_type=$REF_TYPE
object_type=$OBJECT_TYPE"""
)
else:
template = dedent(
"""\
$MSG
full_key: $FULL_KEY
object_type=$OBJECT_TYPE"""
)
s = string.Template(template=template)
message = s.substitute(
REF_TYPE=ref_type_str, OBJECT_TYPE=object_type_str, MSG=msg, FULL_KEY=full_key
)
exception_type = type(cause) if type_override is None else type_override
if exception_type == TypeError:
exception_type = ConfigTypeError
elif exception_type == IndexError:
exception_type = ConfigIndexError
ex = exception_type(f"{message}")
if issubclass(exception_type, OmegaConfBaseException):
ex._initialized = True
ex.msg = message
ex.parent_node = node
ex.child_node = child_node
ex.key = key
ex.full_key = full_key
ex.value = value
ex.object_type = object_type
ex.object_type_str = object_type_str
ex.ref_type = ref_type
ex.ref_type_str = ref_type_str
_raise(ex, cause)
def type_str(t: Any, include_module_name: bool = False) -> str:
is_optional, t = _resolve_optional(t)
if t is None:
return type(t).__name__
if t is Any:
return "Any"
if t is ...:
return "..."
if sys.version_info < (3, 7, 0): # pragma: no cover
# Python 3.6
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t.__origin__ is not None:
name = type_str(t.__origin__)
else:
name = str(t)
if name.startswith("typing."):
name = name[len("typing.") :]
else: # pragma: no cover
# Python >= 3.7
if hasattr(t, "__name__"):
name = str(t.__name__)
else:
if t._name is None:
if t.__origin__ is not None:
name = type_str(
t.__origin__, include_module_name=include_module_name
)
else:
name = str(t._name)
args = getattr(t, "__args__", None)
if args is not None:
args = ", ".join(
[type_str(t, include_module_name=include_module_name) for t in t.__args__]
)
ret = f"{name}[{args}]"
else:
ret = name
if include_module_name:
if (
hasattr(t, "__module__")
and t.__module__ != "builtins"
and t.__module__ != "typing"
and not t.__module__.startswith("omegaconf.")
):
module_prefix = t.__module__ + "."
else:
module_prefix = ""
ret = module_prefix + ret
if is_optional:
return f"Optional[{ret}]"
else:
return ret
def _ensure_container(target: Any, flags: Optional[Dict[str, bool]] = None) -> Any:
from omegaconf import OmegaConf
if is_primitive_container(target):
assert isinstance(target, (list, dict))
target = OmegaConf.create(target, flags=flags)
elif is_structured_config(target):
target = OmegaConf.structured(target, flags=flags)
elif not OmegaConf.is_config(target):
raise ValueError(
"Invalid input. Supports one of "
+ "[dict,list,DictConfig,ListConfig,dataclass,dataclass instance,attr class,attr class instance]"
)
return target
def is_generic_list(type_: Any) -> bool:
"""
Checks if a type is a generic list, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_list_annotation(type_) and get_list_element_type(type_) is not None
def is_generic_dict(type_: Any) -> bool:
"""
Checks if a type is a generic dict, for example:
list returns False
typing.List returns False
typing.List[T] returns True
:param type_: variable type
:return: bool
"""
return is_dict_annotation(type_) and len(get_dict_key_value_types(type_)) > 0
def is_container_annotation(type_: Any) -> bool:
return is_list_annotation(type_) or is_dict_annotation(type_)
def split_key(key: str) -> List[str]:
"""
Split a full key path into its individual components.
This is similar to `key.split(".")` but also works with the getitem syntax:
"a.b" -> ["a", "b"]
"a[b]" -> ["a, "b"]
".a.b[c].d" -> ["", "a", "b", "c", "d"]
"[a].b" -> ["a", "b"]
"""
# Obtain the first part of the key (in docstring examples: a, a, .a, '')
first = KEY_PATH_HEAD.match(key)
assert first is not None
first_stop = first.span()[1]
# `tokens` will contain all elements composing the key.
tokens = key[0:first_stop].split(".")
# Optimization in case `key` has no other component: we are done.
if first_stop == len(key):
return tokens
if key[first_stop] == "[" and not tokens[-1]:
# This is a special case where the first key starts with brackets, e.g.
# [a] or ..[a]. In that case there is an extra "" in `tokens` that we
# need to get rid of:
# [a] -> tokens = [""] but we would like []
# ..[a] -> tokens = ["", "", ""] but we would like ["", ""]
tokens.pop()
# Identify other key elements (in docstring examples: b, b, b/c/d, b)
others = KEY_PATH_OTHER.findall(key[first_stop:])
# There are two groups in the `KEY_PATH_OTHER` regex: one for keys starting
# with a dot (.b, .d) and one for keys starting with a bracket ([b], [c]).
# Only one group can be non-empty.
tokens += [dot_key if dot_key else bracket_key for dot_key, bracket_key in others]
return tokens
# Similar to Python 3.7+'s `contextlib.nullcontext` (which should be used instead,
# once support for Python 3.6 is dropped).
@contextmanager
def nullcontext(enter_result: Any = None) -> Iterator[Any]:
yield enter_result
|
site/tests/unittests/test/test_base64.py | martinphellwig/brython_wf | 652 | 3366 | <filename>site/tests/unittests/test/test_base64.py
import unittest
from test import support
import base64
import binascii
import os
import sys
import subprocess
class LegacyBase64TestCase(unittest.TestCase):
def test_encodebytes(self):
eq = self.assertEqual
eq(base64.encodebytes(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodebytes(b"a"), b"YQ==\n")
eq(base64.encodebytes(b"ab"), b"YWI=\n")
eq(base64.encodebytes(b"abc"), b"YWJj\n")
eq(base64.encodebytes(b""), b"")
eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodebytes(bytearray(b'abc')), b'YWJj\n')
self.assertRaises(TypeError, base64.encodebytes, "")
def test_decodebytes(self):
eq = self.assertEqual
eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n"), b"www.python.org")
eq(base64.decodebytes(b"YQ==\n"), b"a")
eq(base64.decodebytes(b"YWI=\n"), b"ab")
eq(base64.decodebytes(b"YWJj\n"), b"abc")
eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodebytes(b''), b'')
# Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')), b'abc')
self.assertRaises(TypeError, base64.decodebytes, "")
def test_encode(self):
eq = self.assertEqual
from io import BytesIO, StringIO
infp = BytesIO(b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'0123456789!@#0^&*();:<>,. []{}')
outfp = BytesIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('abc'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'abc'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('abc'), StringIO())
def test_decode(self):
from io import BytesIO, StringIO
infp = BytesIO(b'd3d3LnB5dGhvbi5vcmc=')
outfp = BytesIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), b'www.python.org')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'YWJj\n'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), StringIO())
class BaseXYTestCase(unittest.TestCase):
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode(b'\x00'), b'AA==')
eq(base64.b64encode(b"a"), b"YQ==")
eq(base64.b64encode(b"ab"), b"YWI=")
eq(base64.b64encode(b"abc"), b"YWJj")
eq(base64.b64encode(b""), b"")
eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=b'*$'), b'01a*b$cd')
# Non-bytes
eq(base64.b64encode(bytearray(b'abcd')), b'YWJjZA==')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=bytearray(b'*$')),
b'01a*b$cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.b64encode, "")
self.assertRaises(TypeError, base64.b64encode, b"", altchars="")
# Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode(b"a"), b"YQ==")
eq(base64.standard_b64encode(b"ab"), b"YWI=")
eq(base64.standard_b64encode(b"abc"), b"YWJj")
eq(base64.standard_b64encode(b""), b"")
eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
eq(base64.standard_b64encode(bytearray(b'abcd')), b'YWJjZA==')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.standard_b64encode, "")
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d'), b'01a-b_cd')
# Non-bytes
eq(base64.urlsafe_b64encode(bytearray(b'\xd3V\xbeo\xf7\x1d')), b'01a-b_cd')
# Check if passing a str object raises an error
self.assertRaises(TypeError, base64.urlsafe_b64encode, "")
def test_b64decode(self):
eq = self.assertEqual
tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
b'AA==': b'\x00',
b"YQ==": b"a",
b"YWI=": b"ab",
b"YWJj": b"abc",
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}",
b'': b'',
}
for data, res in tests.items():
eq(base64.b64decode(data), res)
eq(base64.b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b64decode(bytearray(b"YWJj")), b"abc")
# Test with arbitrary alternative characters
tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
}
for (data, altchars), res in tests_altchars.items():
data_str = data.decode('ascii')
altchars_str = altchars.decode('ascii')
eq(base64.b64decode(data, altchars=altchars), res)
eq(base64.b64decode(data_str, altchars=altchars), res)
eq(base64.b64decode(data, altchars=altchars_str), res)
eq(base64.b64decode(data_str, altchars=altchars_str), res)
# Test standard alphabet
for data, res in tests.items():
eq(base64.standard_b64decode(data), res)
eq(base64.standard_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.standard_b64decode(bytearray(b"YWJj")), b"abc")
# Test with 'URL safe' alternative characters
tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
b'': b'',
}
for data, res in tests_urlsafe.items():
eq(base64.urlsafe_b64decode(data), res)
eq(base64.urlsafe_b64decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.urlsafe_b64decode(bytearray(b'01a-b_cd')), b'\xd3V\xbeo\xf7\x1d')
def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
self.assertRaises(binascii.Error, base64.b64decode, 'abc')
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b'YWJj\nYWI=', b'abcab'))
for bstr, res in tests:
self.assertEqual(base64.b64decode(bstr), res)
self.assertEqual(base64.b64decode(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(b''), b'')
eq(base64.b32encode(b'\x00'), b'AA======')
eq(base64.b32encode(b'a'), b'ME======')
eq(base64.b32encode(b'ab'), b'MFRA====')
eq(base64.b32encode(b'abc'), b'MFRGG===')
eq(base64.b32encode(b'abcd'), b'MFRGGZA=')
eq(base64.b32encode(b'abcde'), b'MFRGGZDF')
# Non-bytes
eq(base64.b32encode(bytearray(b'abcd')), b'MFRGGZA=')
self.assertRaises(TypeError, base64.b32encode, "")
def test_b32decode(self):
eq = self.assertEqual
tests = {b'': b'',
b'AA======': b'\x00',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data), res)
eq(base64.b32decode(data.decode('ascii')), res)
# Non-bytes
eq(base64.b32decode(bytearray(b'MFRGG===')), b'abc')
def test_b32decode_casefold(self):
eq = self.assertEqual
tests = {b'': b'',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
# Lower cases
b'me======': b'a',
b'mfra====': b'ab',
b'mfrgg===': b'abc',
b'mfrggza=': b'abcd',
b'mfrggzdf': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data, True), res)
eq(base64.b32decode(data.decode('ascii'), True), res)
self.assertRaises(binascii.Error, base64.b32decode, b'me======')
self.assertRaises(binascii.Error, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
(b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
}
for (data, map01), res in map_tests.items():
data_str = data.decode('ascii')
map01_str = map01.decode('ascii')
eq(base64.b32decode(data, map01=map01), res)
eq(base64.b32decode(data_str, map01=map01), res)
eq(base64.b32decode(data, map01=map01_str), res)
eq(base64.b32decode(data_str, map01=map01_str), res)
self.assertRaises(binascii.Error, base64.b32decode, data)
self.assertRaises(binascii.Error, base64.b32decode, data_str)
def test_b32decode_error(self):
for data in [b'abc', b'ABCDEF==', b'==ABCDEF']:
with self.assertRaises(binascii.Error):
base64.b32decode(data)
with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode(b'\x01\x02\xab\xcd\xef'), b'0102ABCDEF')
eq(base64.b16encode(b'\x00'), b'00')
# Non-bytes
eq(base64.b16encode(bytearray(b'\x01\x02\xab\xcd\xef')), b'0102ABCDEF')
self.assertRaises(TypeError, base64.b16encode, "")
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
# Non-bytes
eq(base64.b16decode(bytearray(b"0102ABCDEF")), b'\x01\x02\xab\xcd\xef')
def test_decode_nonascii_str(self):
decode_funcs = (base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
base64.b32decode,
base64.b16decode)
for f in decode_funcs:
self.assertRaises(ValueError, f, 'with non-ascii \xcb')
def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
class TestMain(unittest.TestCase):
def tearDown(self):
if os.path.exists(support.TESTFN):
os.unlink(support.TESTFN)
def get_output(self, *args, **options):
args = (sys.executable, '-m', 'base64') + args
return subprocess.check_output(args, **options)
def test_encode_decode(self):
output = self.get_output('-t')
self.assertSequenceEqual(output.splitlines(), (
b"b'Aladdin:open sesame'",
br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'",
b"b'Aladdin:open sesame'",
))
def test_encode_file(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'a\xffb\n')
output = self.get_output('-e', support.TESTFN)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
with open(support.TESTFN, 'rb') as fp:
output = self.get_output('-e', stdin=fp)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
def test_decode(self):
with open(support.TESTFN, 'wb') as fp:
fp.write(b'Yf9iCg==')
output = self.get_output('-d', support.TESTFN)
self.assertEqual(output.rstrip(), b'a\xffb')
def test_main():
support.run_unittest(__name__)
if __name__ == '__main__':
test_main()
|
tests/algorithms/memory/test_cmac.py | FrostByte266/neupy | 801 | 3395 | <gh_stars>100-1000
import numpy as np
from sklearn import metrics
from neupy import algorithms
from base import BaseTestCase
class CMACTestCase(BaseTestCase):
def test_cmac(self):
X_train = np.reshape(np.linspace(0, 2 * np.pi, 100), (100, 1))
X_train_before = X_train.copy()
X_test = np.reshape(np.linspace(np.pi, 2 * np.pi, 50), (50, 1))
y_train = np.sin(X_train)
y_train_before = y_train.copy()
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
verbose=False,
)
cmac.train(X_train, y_train, epochs=100)
predicted_test = cmac.predict(X_test)
predicted_test = predicted_test.reshape((len(predicted_test), 1))
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0.0024, places=4)
# Test that algorithm didn't modify data samples
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(X_train, X_train_before)
np.testing.assert_array_equal(y_train, y_train_before)
self.assertPickledNetwork(cmac, X_train)
def test_train_different_inputs(self):
self.assertInvalidVectorTrain(
network=algorithms.CMAC(),
input_vector=np.array([1, 2, 3]),
target=np.array([1, 2, 3])
)
def test_predict_different_inputs(self):
cmac = algorithms.CMAC()
data = np.array([[1, 2, 3]]).T
target = np.array([[1, 2, 3]]).T
cmac.train(data, target, epochs=100)
self.assertInvalidVectorPred(
network=cmac,
input_vector=np.array([1, 2, 3]),
target=target,
decimal=2
)
def test_cmac_multi_output(self):
X_train = np.linspace(0, 2 * np.pi, 100)
X_train = np.vstack([X_train, X_train])
X_test = np.linspace(0, 2 * np.pi, 100)
X_test = np.vstack([X_test, X_test])
y_train = np.sin(X_train)
y_test = np.sin(X_test)
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
cmac.train(X_train, y_train,
X_test, y_test, epochs=100)
predicted_test = cmac.predict(X_test)
error = metrics.mean_absolute_error(y_test, predicted_test)
self.assertAlmostEqual(error, 0, places=6)
def test_cmac_training_exceptions(self):
cmac = algorithms.CMAC(
quantization=100,
associative_unit_size=32,
step=0.2,
)
with self.assertRaises(ValueError):
cmac.train(X_train=True, y_train=True,
X_test=None, y_test=True)
|
src/azure-cli/azure/cli/command_modules/policyinsights/_completers.py | YuanyuanNi/azure-cli | 3,287 | 3408 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.decorators import Completer
from azure.cli.core.commands.client_factory import get_subscription_id
from ._client_factory import cf_policy_insights
@Completer
def get_policy_remediation_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx)
sub = get_subscription_id(cmd.cli_ctx)
rg = getattr(namespace, 'resource_group_name', None)
management_group = getattr(namespace, 'management_group_name', None)
if rg:
result = client.remediations.list_for_resource_group(subscription_id=sub, resource_group_name=rg)
elif management_group:
result = client.remediations.list_for_management_group(management_group_id=management_group)
else:
result = client.remediations.list_for_subscription(subscription_id=sub)
return [i.name for i in result]
@Completer
def get_policy_metadata_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument
client = cf_policy_insights(cmd.cli_ctx).policy_metadata
from azure.mgmt.policyinsights.models import QueryOptions
query_options = QueryOptions(top=2000)
return [metadata.name for metadata in client.list(query_options) if metadata.name.startswith(prefix)]
|
hordak/migrations/0011_auto_20170225_2222.py | CodeBrew-LTD/django-hordak | 187 | 3409 | <reponame>CodeBrew-LTD/django-hordak
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-25 22:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import django_smalluuid.models
class Migration(migrations.Migration):
dependencies = [("hordak", "0010_auto_20161216_1202")]
operations = [
migrations.CreateModel(
name="TransactionImport",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"uuid",
django_smalluuid.models.SmallUUIDField(
default=django_smalluuid.models.UUIDDefault(), editable=False, unique=True
),
),
(
"timestamp",
models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
(
"has_headings",
models.BooleanField(
default=True, verbose_name="First line of file contains headings"
),
),
(
"file",
models.FileField(
upload_to="transaction_imports", verbose_name="CSV file to import"
),
),
(
"state",
models.CharField(
choices=[
("pending", "Pending"),
("uploaded", "Uploaded, ready to import"),
("done", "Import complete"),
],
default="pending",
max_length=20,
),
),
(
"date_format",
models.CharField(
choices=[
("%d-%m-%Y", "dd-mm-yyyy"),
("%d/%m/%Y", "dd/mm/yyyy"),
("%d.%m.%Y", "dd.mm.yyyy"),
("%d-%Y-%m", "dd-yyyy-mm"),
("%d/%Y/%m", "dd/yyyy/mm"),
("%d.%Y.%m", "dd.yyyy.mm"),
("%m-%d-%Y", "mm-dd-yyyy"),
("%m/%d/%Y", "mm/dd/yyyy"),
("%m.%d.%Y", "mm.dd.yyyy"),
("%m-%Y-%d", "mm-yyyy-dd"),
("%m/%Y/%d", "mm/yyyy/dd"),
("%m.%Y.%d", "mm.yyyy.dd"),
("%Y-%d-%m", "yyyy-dd-mm"),
("%Y/%d/%m", "yyyy/dd/mm"),
("%Y.%d.%m", "yyyy.dd.mm"),
("%Y-%m-%d", "yyyy-mm-dd"),
("%Y/%m/%d", "yyyy/mm/dd"),
("%Y.%m.%d", "yyyy.mm.dd"),
("%d-%m-%y", "dd-mm-yy"),
("%d/%m/%y", "dd/mm/yy"),
("%d.%m.%y", "dd.mm.yy"),
("%d-%y-%m", "dd-yy-mm"),
("%d/%y/%m", "dd/yy/mm"),
("%d.%y.%m", "dd.yy.mm"),
("%m-%d-%y", "mm-dd-yy"),
("%m/%d/%y", "mm/dd/yy"),
("%m.%d.%y", "mm.dd.yy"),
("%m-%y-%d", "mm-yy-dd"),
("%m/%y/%d", "mm/yy/dd"),
("%m.%y.%d", "mm.yy.dd"),
("%y-%d-%m", "yy-dd-mm"),
("%y/%d/%m", "yy/dd/mm"),
("%y.%d.%m", "yy.dd.mm"),
("%y-%m-%d", "yy-mm-dd"),
("%y/%m/%d", "yy/mm/dd"),
("%y.%m.%d", "yy.mm.dd"),
],
default="%d-%m-%Y",
max_length=50,
),
),
(
"hordak_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="hordak.StatementImport"
),
),
],
),
migrations.CreateModel(
name="TransactionImportColumn",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("column_number", models.PositiveSmallIntegerField()),
(
"column_heading",
models.CharField(blank=True, default="", max_length=100, verbose_name="Column"),
),
(
"to_field",
models.CharField(
blank=True,
choices=[
(None, "-- Do not import --"),
("date", "Date"),
("amount", "Amount"),
("amount_out", "Amount (money in only)"),
("amount_in", "Amount (money out only)"),
("description", "Description / Notes"),
],
default=None,
max_length=20,
null=True,
verbose_name="Is",
),
),
("example", models.CharField(blank=True, default="", max_length=200)),
(
"transaction_import",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="columns",
to="hordak.TransactionImport",
),
),
],
options={"ordering": ["transaction_import", "column_number"]},
),
migrations.AlterUniqueTogether(
name="transactionimportcolumn",
unique_together=set(
[("transaction_import", "column_number"), ("transaction_import", "to_field")]
),
),
]
|
tests/test_channel.py | rwilhelm/aiormq | 176 | 3426 | import asyncio
import uuid
import pytest
from aiomisc_pytest.pytest_plugin import TCPProxy
import aiormq
async def test_simple(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"foo",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b"foo"
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
async def test_blank_body(amqp_channel: aiormq.Channel):
await amqp_channel.basic_qos(prefetch_count=1)
assert amqp_channel.number
queue = asyncio.Queue()
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
consume_ok = await amqp_channel.basic_consume(deaclare_ok.queue, queue.put)
await amqp_channel.basic_publish(
b"",
routing_key=deaclare_ok.queue,
properties=aiormq.spec.Basic.Properties(message_id="123"),
)
message = await queue.get() # type: DeliveredMessage
assert message.body == b""
cancel_ok = await amqp_channel.basic_cancel(consume_ok.consumer_tag)
assert cancel_ok.consumer_tag == consume_ok.consumer_tag
assert cancel_ok.consumer_tag not in amqp_channel.consumers
await amqp_channel.queue_delete(deaclare_ok.queue)
deaclare_ok = await amqp_channel.queue_declare(auto_delete=True)
await amqp_channel.basic_publish(b"foo bar", routing_key=deaclare_ok.queue)
message = await amqp_channel.basic_get(deaclare_ok.queue, no_ack=True)
assert message.body == b"foo bar"
@pytest.mark.no_catch_loop_exceptions
async def test_bad_consumer(amqp_channel: aiormq.Channel, loop):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare()
future = loop.create_future()
await channel.basic_publish(b"urgent", routing_key=declare_ok.queue)
consumer_tag = loop.create_future()
async def bad_consumer(message):
await channel.basic_cancel(await consumer_tag)
future.set_result(message)
raise Exception
consume_ok = await channel.basic_consume(
declare_ok.queue, bad_consumer, no_ack=False,
)
consumer_tag.set_result(consume_ok.consumer_tag)
message = await future
await channel.basic_reject(message.delivery.delivery_tag, requeue=True)
assert message.body == b"urgent"
future = loop.create_future()
await channel.basic_consume(
declare_ok.queue, future.set_result, no_ack=True,
)
message = await future
assert message.body == b"urgent"
async def test_ack_nack_reject(amqp_channel: aiormq.Channel):
channel = amqp_channel # type: aiormq.Channel
await channel.basic_qos(prefetch_count=1)
declare_ok = await channel.queue_declare(auto_delete=True)
queue = asyncio.Queue()
await channel.basic_consume(declare_ok.queue, queue.put, no_ack=False)
await channel.basic_publish(b"rejected", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"rejected"
await channel.basic_reject(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"nacked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"nacked"
await channel.basic_nack(message.delivery.delivery_tag, requeue=False)
await channel.basic_publish(b"acked", routing_key=declare_ok.queue)
message = await queue.get()
assert message.body == b"acked"
await channel.basic_ack(message.delivery.delivery_tag)
async def test_confirm_multiple(amqp_channel: aiormq.Channel):
"""
RabbitMQ has been observed to send confirmations in a strange pattern
when publishing simultaneously where only some messages are delivered
to a queue. It sends acks like this 1 2 4 5(multiple, confirming also 3).
This test is probably inconsequential without publisher_confirms
This is a regression for https://github.com/mosquito/aiormq/issues/10
"""
channel = amqp_channel # type: aiormq.Channel
exchange = uuid.uuid4().hex
await channel.exchange_declare(exchange, exchange_type="topic")
try:
declare_ok = await channel.queue_declare(exclusive=True)
await channel.queue_bind(
declare_ok.queue, exchange, routing_key="test.5",
)
for i in range(10):
messages = [
asyncio.ensure_future(channel.basic_publish(
b"test", exchange=exchange, routing_key="test.{}".format(i),
))
for i in range(10)
]
_, pending = await asyncio.wait(messages, timeout=0.2)
assert not pending, "not all publishes were completed (confirmed)"
await asyncio.sleep(0.05)
finally:
await channel.exchange_delete(exchange)
async def test_exclusive_queue_locked(amqp_connection):
channel0 = await amqp_connection.channel()
channel1 = await amqp_connection.channel()
qname = str(uuid.uuid4())
await channel0.queue_declare(qname, exclusive=True)
try:
await channel0.basic_consume(qname, print, exclusive=True)
with pytest.raises(aiormq.exceptions.ChannelLockedResource):
await channel1.queue_declare(qname)
await channel1.basic_consume(qname, print, exclusive=True)
finally:
await channel0.queue_delete(qname)
async def test_remove_writer_when_closed(amqp_channel: aiormq.Channel):
with pytest.raises(aiormq.exceptions.ChannelClosed):
await amqp_channel.queue_declare(
"amq.forbidden_queue_name", auto_delete=True,
)
with pytest.raises(aiormq.exceptions.ChannelInvalidStateError):
await amqp_channel.queue_delete("amq.forbidden_queue_name")
async def test_proxy_connection(proxy_connection, proxy: TCPProxy):
channel = await proxy_connection.channel() # type: aiormq.Channel
await channel.queue_declare(auto_delete=True)
async def test_declare_queue_timeout(proxy_connection, proxy: TCPProxy):
for _ in range(3):
channel = await proxy_connection.channel() # type: aiormq.Channel
qname = str(uuid.uuid4())
with proxy.slowdown(read_delay=5, write_delay=0):
with pytest.raises(asyncio.TimeoutError):
await channel.queue_declare(
qname, auto_delete=True, timeout=0.5
)
|
plaso/parsers/winreg_plugins/ccleaner.py | pyllyukko/plaso | 1,253 | 3429 | # -*- coding: utf-8 -*-
"""Parser for the CCleaner Registry key."""
import re
from dfdatetime import time_elements as dfdatetime_time_elements
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import winreg_parser
from plaso.parsers.winreg_plugins import interface
class CCleanerConfigurationEventData(events.EventData):
"""CCleaner configuration event data.
Attributes:
configuration (str): CCleaner configuration.
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:configuration'
def __init__(self):
"""Initializes event data."""
super(CCleanerConfigurationEventData, self).__init__(
data_type=self.DATA_TYPE)
self.configuration = None
self.key_path = None
class CCleanerUpdateEventData(events.EventData):
"""CCleaner update event data.
Attributes:
key_path (str): Windows Registry key path.
"""
DATA_TYPE = 'ccleaner:update'
def __init__(self):
"""Initializes event data."""
super(CCleanerUpdateEventData, self).__init__(data_type=self.DATA_TYPE)
self.key_path = None
class CCleanerPlugin(interface.WindowsRegistryPlugin):
"""Gathers the CCleaner Keys for NTUSER hive.
Known Windows Registry values within the CCleaner key:
* (App)Cookies [REG_SZ], contains "True" if the cookies should be cleaned;
* (App)Delete Index.dat files [REG_SZ]
* (App)History [REG_SZ]
* (App)Last Download Location [REG_SZ]
* (App)Other Explorer MRUs [REG_SZ]
* (App)Recent Documents [REG_SZ]
* (App)Recently Typed URLs [REG_SZ]
* (App)Run (in Start Menu) [REG_SZ]
* (App)Temporary Internet Files [REG_SZ]
* (App)Thumbnail Cache [REG_SZ]
* CookiesToSave [REG_SZ]
* UpdateKey [REG_SZ], contains a date and time formatted as:
"MM/DD/YYYY hh:mm:ss [A|P]M", for example "07/13/2013 10:03:14 AM";
* WINDOW_HEIGHT [REG_SZ], contains the windows height in number of pixels;
* WINDOW_LEFT [REG_SZ]
* WINDOW_MAX [REG_SZ]
* WINDOW_TOP [REG_SZ]
* WINDOW_WIDTH [REG_SZ], contains the windows width in number of pixels;
Also see:
http://cheeky4n6monkey.blogspot.com/2012/02/writing-ccleaner-regripper-plugin-part_05.html
"""
NAME = 'ccleaner'
DATA_FORMAT = 'CCleaner Registry data'
FILTERS = frozenset([
interface.WindowsRegistryKeyPathFilter(
'HKEY_CURRENT_USER\\Software\\Piriform\\CCleaner')])
# Date and time string formatted as: "MM/DD/YYYY hh:mm:ss [A|P]M"
# for example "07/13/2013 10:03:14 AM"
# TODO: determine if this is true for other locales.
_UPDATE_DATE_TIME_RE = re.compile(
r'([0-9][0-9])/([0-9][0-9])/([0-9][0-9][0-9][0-9]) '
r'([0-9][0-9]):([0-9][0-9]):([0-9][0-9]) ([A|P]M)')
def _ParseUpdateKeyValue(self, parser_mediator, registry_value):
"""Parses the UpdateKey value.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_value (dfwinreg.WinRegistryValue): Windows Registry value.
Returns:
dfdatetime_time_elements.TimeElements: date and time value or None
if not available.
"""
if not registry_value.DataIsString():
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data type: {0:s}'.format(
registry_value.data_type_string))
return None
date_time_string = registry_value.GetDataAsObject()
if not date_time_string:
parser_mediator.ProduceExtractionWarning('missing UpdateKey value data')
return None
re_match = self._UPDATE_DATE_TIME_RE.match(date_time_string)
if not re_match:
parser_mediator.ProduceExtractionWarning(
'unsupported UpdateKey value data: {0!s}'.format(date_time_string))
return None
month, day_of_month, year, hours, minutes, seconds, part_of_day = (
re_match.groups())
try:
year = int(year, 10)
month = int(month, 10)
day_of_month = int(day_of_month, 10)
hours = int(hours, 10)
minutes = int(minutes, 10)
seconds = int(seconds, 10)
except (TypeError, ValueError):
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(date_time_string))
return None
if part_of_day == 'PM':
hours += 12
time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)
try:
date_time = dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
date_time.is_local_time = True
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid UpdateKey date time value: {0!s}'.format(
time_elements_tuple))
return None
return date_time
def ExtractEvents(self, parser_mediator, registry_key, **kwargs):
"""Extracts events from a Windows Registry key.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
"""
configuration = []
date_time = None
for registry_value in registry_key.GetValues():
if not registry_value.name or not registry_value.data:
continue
if registry_value.name == 'UpdateKey':
date_time = self._ParseUpdateKeyValue(parser_mediator, registry_value)
else:
value = registry_value.GetDataAsObject()
configuration.append('{0:s}: {1!s}'.format(registry_value.name, value))
if date_time:
event_data = CCleanerUpdateEventData()
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_UPDATE,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
event_data = CCleanerConfigurationEventData()
event_data.configuration = ' '.join(sorted(configuration)) or None
event_data.key_path = registry_key.path
event = time_events.DateTimeValuesEvent(
registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data)
winreg_parser.WinRegistryParser.RegisterPlugin(CCleanerPlugin)
|
language/labs/drkit/evaluate.py | Xtuden-com/language | 1,199 | 3441 | <gh_stars>1000+
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluate lazy slot filling results."""
import codecs
import collections
import gzip
import json
import random
import re
import string
import unicodedata
from absl import app
from absl import flags
from bert import tokenization
from language.labs.drkit import input_fns
import numpy as np
import tensorflow.compat.v1 as tf
PUNCTUATION = frozenset(string.punctuation)
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("ground_truth_file", None,
"File with ground truth answers.")
flags.DEFINE_string("predicted_answers_file", None,
"File with predicted answers from model.")
flags.DEFINE_string("relation_counts_file", None,
"JSON file with relation counts.")
class NumpyEncoder(json.JSONEncoder):
"""Special json encoder for numpy types."""
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8, np.int16, np.int32,
np.int64, np.uint8, np.uint16, np.uint32, np.uint64)):
return int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
return float(obj)
elif isinstance(obj, (np.ndarray,)): # This is the fix
return obj.tolist()
return json.JSONEncoder.default(self, obj)
def wikimovie_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_ques = {ex.qas_id: ex.question_text for ex in dataset.examples}
gt_entity = {ex.qas_id: ex.subject_entity[0] for ex in dataset.examples}
inf_chain = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
chain2stats = {ch: [0., 0.] for ch in inf_chain.values()}
incorrect_results, correct_results = [], []
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction in gt_answer[qas_id]:
num_correct += 1
chain2stats[inf_chain[qas_id]][0] += 1
correct_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
correct_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
else:
incorrect_results.append({
"qas_id": result["qas_ids"],
"question": gt_ques[qas_id],
"answers": gt_answer[qas_id],
"subject": gt_entity[qas_id],
"inf-chain": inf_chain[qas_id],
"predictions": result["predictions"],
})
for hop in range(3):
if "sparse_%d" % hop in result:
incorrect_results[-1].update({
"sparse_%d" % hop: result["sparse_%d" % hop],
"dense_%d" % hop: result["dense_%d" % hop],
"mention_%d" % hop: result["mention_%d" % hop],
"entity_%d" % hop: result["entity_%d" % hop],
"sparse_scores_%d" % hop: result["sparse_scores_%d" % hop],
"dense_scores_%d" % hop: result["dense_scores_%d" % hop],
"mention_scores_%d" % hop: result["mention_scores_%d" % hop],
"entity_scores_%d" % hop: result["entity_scores_%d" % hop],
})
chain2stats[inf_chain[qas_id]][1] += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
json.dump(
random.sample(incorrect_results, 100),
tf.gfile.Open(output_prediction_file + ".incorrect", "w"),
cls=NumpyEncoder)
json.dump(
random.sample(correct_results, 100),
tf.gfile.Open(output_prediction_file + ".correct", "w"),
cls=NumpyEncoder)
# Return metrics.
metrics = {
"accuracy": accuracy,
}
for ch, stats in chain2stats.items():
metrics["inference-chains-acc/" + ch] = stats[0] / stats[1]
return metrics
def multihop_eval_fn(dataset,
results,
name_map,
output_prediction_file,
supervision="mention",
**kwargs):
"""Compute evaluation metrics for OneHopDataset or TwoHopDataset.
Args:
dataset: An object of type OneHopDataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
supervision: Type of supervision used in the model.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_mentions = {ex.qas_id: ex.answer_mention[0] for ex in dataset.examples}
if supervision == "mention":
gt_answer = gt_mentions
else:
gt_answer = {ex.qas_id: ex.answer_entity[0] for ex in dataset.examples}
# Compute basic metrics.
num_correct = 0.
all_predictions = {}
for result in results:
qas_id = result["qas_ids"]
prediction = result["predictions"]
if prediction == gt_answer[qas_id]:
num_correct += 1
all_predictions[qas_id] = name_map[str(prediction)]
accuracy = num_correct / len(all_predictions)
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
micro, macro, _, _ = compute_scores(dataset.gt_file, output_prediction_file)
# Return metrics.
metrics = {
"accuracy": accuracy,
"micro-p": micro[0],
"micro-r": micro[1],
"micro-f": micro[2],
"macro-p": macro[0],
"macro-r": macro[1],
"macro-f": macro[2],
}
return metrics
def hotpot_eval_fn(dataset, results, name_map, output_prediction_file,
**kwargs):
"""Compute evaluation metrics for HotpotQADataset.
Args:
dataset: An object of type HotpotQADataset.
results: A list of result dicts from running estimator.predict.
name_map: A mapping from prediction indices to text strings.
output_prediction_file: File to store predictions to.
**kwargs: Variable keyword arguments.
Returns:
metrics: A dict mapping metric names to values.
"""
del kwargs
# Collect ground truth answers.
gt_answer = {ex.qas_id: ex.answer_entity for ex in dataset.examples}
gt_types = {ex.qas_id: ex.inference_chain for ex in dataset.examples}
# Compute basic metrics.
num_correct = {2: 0., 5: 0., 10: 0., 20: 0.}
aps = []
no_answer = 0.
all_predictions = {}
bridge_acc, comp_acc = 0., 0.
bridge_tot, comp_tot = 0, 0
single_acc = 0.
layer_weights = np.zeros_like(results[0]["layer_probs"])
num_layer_entities = {i: 0. for i in range(layer_weights.shape[0])}
num_new_entities = {i: 0. for i in range(layer_weights.shape[0])}
for result in results:
qas_id = result["qas_ids"].decode("utf-8")
preds = result["top_idx"]
scores = result["top_vals"]
ans = gt_answer[qas_id]
my_type = gt_types[qas_id]
if my_type == "bridge":
bridge_tot += 1
else:
comp_tot += 1
ranks = np.where(np.in1d(preds, ans))[0]
ranks = np.sort(ranks)
ap = 0.
cnt = 0.
if any(rr < 10 for rr in ranks):
single_acc += 1
if ranks.shape[0] == 0:
no_answer += 1
for rr in ranks:
cnt += 1
ap += cnt / (rr + 1)
if ans:
aps.append(ap / len(ans))
else:
aps.append(0.)
found = False
for key in [2, 5, 10, 20]:
if found or np.in1d(ans, preds[:key]).all():
num_correct[key] += 1
found = True
if key == 10:
if my_type == "bridge":
bridge_acc += 1
else:
comp_acc += 1
# Non-accuracy stats
layer_weights += result["layer_probs"]
layer_entities = {i: set() for i in range(layer_weights.shape[0])}
all_predictions[qas_id] = {}
for i in range(layer_weights.shape[0]):
layer_entities[i] = set(
[ee for ee in result["layer_%d_ent" % i] if ee != -1])
num_layer_entities[i] += len(layer_entities[i])
num_new_entities[i] += len(layer_entities[i] - layer_entities[0])
# all_predictions[qas_id]["layer_%d" % i] = [
# name_map[str(ee)] for ee in layer_entities[i]]
all_predictions[qas_id]["predictions"] = [
(name_map[str(pred)], str(scores[i])) for i, pred in enumerate(preds)
]
tf.logging.info("Evaluated %d items", len(all_predictions))
accuracy = {
key: (num_correct[key] / len(all_predictions)) for key in num_correct
}
# Compute advanced metrics.
json.dump(all_predictions, tf.gfile.Open(output_prediction_file, "w"))
# Return metrics.
metrics = {"eval/@%d" % key: accuracy[key] for key in accuracy}
metrics["accuracy"] = accuracy[10]
metrics["eval/map"] = sum(aps) / len(all_predictions)
metrics["eval/bridge_accuracy"] = bridge_acc / bridge_tot
metrics["eval/comparison_accuracy"] = comp_acc / comp_tot
metrics["analysis/single_accuracy"] = single_acc / len(all_predictions)
metrics["analysis/no_answers"] = no_answer / len(all_predictions)
for i in range(layer_weights.shape[0]):
metrics["analysis/layer_weight_%d" %
i] = layer_weights[i] / len(all_predictions)
metrics["analysis/num_entities_%d" %
i] = num_layer_entities[i] / len(all_predictions)
metrics["analysis/num_new_entities_%d" %
i] = num_new_entities[i] / len(all_predictions)
return metrics
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
"""Compute F1 score."""
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = collections.Counter(prediction_tokens) & collections.Counter(
ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""Compute EM score."""
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
my_score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(my_score)
return max(scores_for_ground_truths)
def read_predictions(prediction_file):
with tf.gfile.Open(prediction_file) as f:
predictions = json.load(f)
return predictions
def read_answers(gold_file):
"""Read ground truth answers."""
answers = {}
f = tf.gfile.Open(gold_file)
if gold_file.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
for i, line in enumerate(f):
example = json.loads(line)
if i == 0 and "header" in example:
continue
for qa in example["qas"]:
answers[qa["qid"]] = qa["answers"]
f.close()
return answers
def evaluate(answers, predictions, skip_no_answer=False):
"""Compute F1 and EM scores."""
f1 = exact_match = total = 0
for qid, ground_truths in answers.items():
if qid not in predictions:
if not skip_no_answer:
message = "Unanswered question %s will receive score 0." % qid
print(message)
total += 1
continue
total += 1
prediction = predictions[qid]
exact_match += metric_max_over_ground_truths(exact_match_score, prediction,
ground_truths)
f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {"exact_match": exact_match, "f1": f1}
def mrqa_eval_fn(dataset_file, predictions_file, skip_no_answer=True):
answers = read_answers(dataset_file)
predictions = read_predictions(predictions_file)
return evaluate(answers, predictions, skip_no_answer)
def compute_scores(ground_truth_file, predicted_answers_file):
"""Read predictions and ground truth and return P, R, F."""
telemetry, incorrect = read_results(ground_truth_file, predicted_answers_file)
micro = aprf(telemetry)
relationwise = aprf_relationwise(telemetry)
macro = sum([val[0] for _, val in relationwise.items()])
macro = macro / len(relationwise)
return micro, macro, relationwise, incorrect
def read_results(ground_truth_file, predicted_answers_file):
"""Read results and ground truth and return data structure with stats."""
with codecs.getreader("utf-8")(tf.gfile.GFile(ground_truth_file,
"r")) as read:
data_ = {}
for line in read:
item = json.loads(line.strip())
if isinstance(item["relation"], dict):
relation = item["relation"]["wikidata_id"]
elif isinstance(item["relation"], list):
relation = (
item["relation"][0]["wikidata_id"] + "_" +
item["relation"][1]["wikidata_id"])
data_[item["id"]] = [relation, item["subject"]["wikidata_id"]]
if "is_impossible" in item and item["is_impossible"]:
continue
if item["object"] is None:
continue
if isinstance(item["object"]["mention"], dict):
data_[item["id"]] += [item["object"]["mention"]["text"]]
if "name" in item["object"]:
data_[item["id"]] += [item["object"]["name"]]
if "aliases" in item["object"]:
data_[item["id"]] += item["object"]["aliases"].keys()
with codecs.getreader("utf-8")(tf.gfile.GFile(predicted_answers_file,
"r")) as fin:
predictions = json.load(fin)
telemetry, incorrect = [], []
n = 0
for key in data_:
if key not in predictions:
continue
g = data_[key][2:]
a = predictions[key]
m = data_[key][:2]
stats = score(g, a)
telemetry.append([m[0], m[1], g, a, stats])
if stats[0] == 0. and stats[3] > 0.:
incorrect.append(key)
n += 1
return telemetry, incorrect
def aprf_relationwise(g):
"""Returns precision, recall and F score for each relation."""
rel_to_stats = collections.defaultdict(list)
for item in g:
rel_to_stats[item[0]].append(item)
rel_to_scores = {}
for rel, stats in rel_to_stats.items():
rel_to_scores[rel] = [aprf(stats), len(stats)]
return rel_to_scores
def aprf(g):
"""Returns precision, recall and F of the given statistics."""
tp, _, sys_pos, real_pos = sum([x[-1] for x in g])
if tp == 0:
p = r = f = 0.0
else:
p = tp / float(sys_pos) if sys_pos > 0 else 0.
r = tp / float(real_pos) if real_pos > 0 else 0.
f = 2 * p * r / (p + r)
return np.asarray([p, r, f])
def score(gold, answer):
"""Compares answer to ground truth to return TP / FP stats."""
if gold:
gold = set([simplify(g) for g in gold])
answer = simplify(answer)
result = np.zeros(4)
if gold:
result[3] += 1
if answer in gold:
result[0] += 1
else:
if not answer:
result[1] += 1
if answer:
result[2] += 1
return result
def strip_accents_and_punct(text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
if char in PUNCTUATION:
continue
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def simplify(answer):
"""Pre-process answer string."""
toks = []
articles = {"the", "a", "an", "and", ""}
for t in answer.strip().lower().split():
tok = strip_accents_and_punct(t)
if tok not in articles:
toks.append(tok)
return "".join(toks)
def rare_relation_scores(relationwise, relation2counts):
"""Print statistics of rare relations for different thresholds."""
for thresh in [5, 100, 500, 1000]:
freq_stats, freq_total = np.array([0., 0., 0.]), 0
rare_stats, rare_total = np.array([0., 0., 0.]), 0
for relation, (stats, _) in relationwise.items():
if relation2counts.get(relation, 0) < thresh:
rare_stats += stats
rare_total += 1
else:
freq_stats += stats
freq_total += 1
rare_stats /= rare_total
freq_stats /= freq_total
print(
"Threshold =", thresh, "rare", rare_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(rare_stats[0], rare_stats[1], rare_stats[2]), "freq", freq_total,
"Micro-P %.3f Micro-R %.3f Micro-F %.3f" %
(freq_stats[0], freq_stats[1], freq_stats[2]))
def main(_):
eval_type = "hotpot"
if eval_type == "hotpot":
test_hotpot_eval()
else:
micro, macro, rwise, _ = compute_scores(FLAGS.ground_truth_file,
FLAGS.predicted_answers_file)
print("Micro", micro)
print("Macro", macro)
if FLAGS.relation_counts_file is not None:
r2c = json.load(tf.gfile.Open(FLAGS.relation_counts_file))
rare_relation_scores(rwise, r2c)
if __name__ == "__main__":
app.run(main)
|
test/library/draft/DataFrames/psahabu/AddSeries.py | jhh67/chapel | 1,602 | 3447 | <gh_stars>1000+
import pandas as pd
I = ["A", "B", "C", "D", "E"]
oneDigit = pd.Series([1, 2, 3, 4, 5], pd.Index(I))
twoDigit = pd.Series([10, 20, 30, 40, 50], pd.Index(I))
print "addends:"
print oneDigit
print twoDigit
print
print "sum:"
print oneDigit + twoDigit
print
I2 = ["A", "B", "C"]
I3 = ["B", "C", "D", "E"]
X = pd.Series([0, 1, 2], pd.Index(I2))
Y = pd.Series([10, 20, 0, 0], pd.Index(I3))
print "addends:"
print X
print Y
print
print "sum:"
print X + Y
print
A = pd.Series(["hello ", "my ", "name", "is", "brad"])
B = pd.Series(["world", "real"])
print "addends:"
print A
print B
print
print "sum: "
print A + B
|
tests/test_provider_Mongey_kafka_connect.py | mjuenema/python-terrascript | 507 | 3451 | # tests/test_provider_Mongey_kafka-connect.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:20:11 UTC)
def test_provider_import():
import terrascript.provider.Mongey.kafka_connect
def test_resource_import():
from terrascript.resource.Mongey.kafka_connect import kafka_connect_connector
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.Mongey.kafka_connect
#
# t = terrascript.provider.Mongey.kafka_connect.kafka_connect()
# s = str(t)
#
# assert 'https://github.com/Mongey/terraform-provider-kafka-connect' in s
# assert '0.2.3' in s
|
tests/test_config_parser.py | KevinMFong/pyhocon | 424 | 3457 | # -*- encoding: utf-8 -*-
import json
import os
import shutil
import tempfile
from collections import OrderedDict
from datetime import timedelta
from pyparsing import ParseBaseException, ParseException, ParseSyntaxException
import mock
import pytest
from pyhocon import (ConfigFactory, ConfigParser, ConfigSubstitutionException, ConfigTree)
from pyhocon.exceptions import (ConfigException, ConfigMissingException,
ConfigWrongTypeException)
try:
from dateutil.relativedelta import relativedelta as period
except Exception:
from datetime import timedelta as period
class TestConfigParser(object):
def test_parse_simple_value(self):
config = ConfigFactory.parse_string(
"""t = {
c = 5
"d" = true
e.y = {
f: 7
g: "hey dude!"
h: hey man
i = \"\"\"
"first line"
"second" line
\"\"\"
}
j = [1, 2, 3]
u = 192.168.1.3/32
g = null
}
"""
)
assert config.get_string('t.c') == '5'
assert config.get_int('t.c') == 5
assert config.get_float('t.c') == 5.0
assert config.get('t.e.y.f') == 7
assert config.get('t.e.y.g') == 'hey dude!'
assert config.get('t.e.y.h') == 'hey man'
assert [v.strip() for v in config.get('t.e.y.i').split('\n')] == ['', '"first line"', '"second" line', '']
assert config.get_bool('t.d') is True
assert config.get_int('t.e.y.f') == 7
assert config.get('t.j') == [1, 2, 3]
assert config.get('t.u') == '192.168.1.3/32'
assert config.get_int('t.g') is None
assert config.get_float('t.g') is None
assert config.get_string('t.g') is None
assert config.get_bool('t.g') is None
assert config.get_list('t.g') is None
assert config.get_config('t.g') is None
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_fail_parse_forbidden_characters(self, forbidden_char):
with pytest.raises(ParseBaseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['$', '"'])
def test_fail_parse_forbidden_characters_in_context(self, forbidden_char):
with pytest.raises(ParseException):
ConfigFactory.parse_string('a: hey man{}'.format(forbidden_char))
@pytest.mark.parametrize('forbidden_char', ['+', '`', '^', '?', '!', '@', '*', '&'])
def test_parse_forbidden_characters_quoted(self, forbidden_char):
value = "hey man{}".format(forbidden_char)
config = ConfigFactory.parse_string('a: "{}"'.format(value))
assert config.get_string("a") == value
def test_parse_with_enclosing_brace(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: 5
}
}
"""
)
assert config.get_string('a.b') == '5'
@pytest.mark.parametrize('data_set', [
('a: 1 minutes', period(minutes=1)),
('a: 1minutes', period(minutes=1)),
('a: 2 minute', period(minutes=2)),
('a: 3 m', period(minutes=3)),
('a: 3m', period(minutes=3)),
('a: 3 min', '3 min'),
('a: 4 seconds', period(seconds=4)),
('a: 5 second', period(seconds=5)),
('a: 6 s', period(seconds=6)),
('a: 6 sec', '6 sec'),
('a: 7 hours', period(hours=7)),
('a: 8 hour', period(hours=8)),
('a: 9 h', period(hours=9)),
('a: 10 weeks', period(weeks=10)),
('a: 11 week', period(weeks=11)),
('a: 12 w', period(weeks=12)),
('a: 10 days', period(days=10)),
('a: 11 day', period(days=11)),
('a: 12 d', period(days=12)),
('a: 110 microseconds', period(microseconds=110)),
('a: 111 microsecond', period(microseconds=111)),
('a: 112 micros', period(microseconds=112)),
('a: 113 micro', period(microseconds=113)),
('a: 114 us', period(microseconds=114)),
('a: 110 milliseconds', timedelta(milliseconds=110)),
('a: 111 millisecond', timedelta(milliseconds=111)),
('a: 112 millis', timedelta(milliseconds=112)),
('a: 113 milli', timedelta(milliseconds=113)),
('a: 114 ms', timedelta(milliseconds=114)),
('a: 110 nanoseconds', period(microseconds=0)),
('a: 11000 nanoseconds', period(microseconds=11)),
('a: 1110000 nanosecond', period(microseconds=1110)),
('a: 1120000 nanos', period(microseconds=1120)),
('a: 1130000 nano', period(microseconds=1130)),
('a: 1140000 ns', period(microseconds=1140)),
])
def test_parse_string_with_duration(self, data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
def test_parse_string_with_duration_with_long_unit_name(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: 10 weeks
c: bar
"""
)
assert config['b'] == period(weeks=10)
def test_parse_with_list_mixed_types_with_durations_and_trailing_comma(self):
config = ConfigFactory.parse_string(
"""
a: foo
b: [a, 1, 10 weeks, 5 minutes,]
c: bar
"""
)
assert config['b'] == ['a', 1, period(weeks=10), period(minutes=5)]
def test_parse_with_enclosing_square_bracket(self):
config = ConfigFactory.parse_string("[1, 2, 3]")
assert config == [1, 2, 3]
def test_quoted_key_with_dots(self):
config = ConfigFactory.parse_string(
"""
"a.b.c.d": 3
t {
"d": {
"c": 5
}
}
k {
"b.f.d": 7
}
"""
)
assert config['"a.b.c.d"'] == 3
assert config['t.d.c'] == 5
assert config['k."b.f.d"'] == 7
def test_dotted_notation_merge(self):
config = ConfigFactory.parse_string(
"""
a {
b = foo
c = bar
}
a.c = ${a.b}" "${a.b}
a.d = baz
"""
)
assert config['a.b'] == "foo"
assert config['a.c'] == "foo foo"
assert config['a.d'] == "baz"
def test_comma_to_separate_expr(self):
config = ConfigFactory.parse_string(
"""
a=1,
b="abc",
c=the man,
d=woof,
a-b-c-d=test,
a b c d=test2,
"a b c d e"=test3
"""
)
assert config.get('a') == 1
assert config.get('b') == 'abc'
assert config.get('c') == 'the man'
assert config.get('d') == 'woof'
assert config.get('a-b-c-d') == 'test'
assert config.get('a b c d') == 'test2'
assert config.get('a b c d e') == 'test3'
def test_dict_merge(self):
config = ConfigFactory.parse_string(
"""
a {
d {
g.h.j.u: 5
g {
h.d: 4
}
g.h.k: f d
}
h.i.m = 7
h.i {
d: 5
}
h.i {
e:65
}
}
""")
expected_result = {
"a": {
"d": {
"g": {
"h": {
"j": {
"u": 5
},
"d": 4,
"k": "f d"
}
}
},
"h": {
"i": {
"m": 7,
"d": 5,
"e": 65
}
}
}
}
assert expected_result == config
def test_parse_with_comments(self):
config = ConfigFactory.parse_string(
"""
// comment 1
# comment 2
{
c = test // comment 0
g = 6 test # comment 0
# comment 3
a: { # comment 4
b: test, # comment 5
} # comment 6
t = [1, # comment 7
2, # comment 8
3, # comment 9
]
} # comment 10
// comment 11
// comment 12
"""
)
assert config.get('c') == 'test'
assert config.get('g') == '6 test'
assert config.get('a.b') == 'test'
assert config.get_string('a.b') == 'test'
assert config.get('t') == [1, 2, 3]
def test_missing_config(self):
config = ConfigFactory.parse_string(
"""
a = 5
"""
)
# b is not set so show raise an exception
with pytest.raises(ConfigMissingException):
config.get('b')
def test_parse_null(self):
config = ConfigFactory.parse_string(
"""
a = null
b = [null]
"""
)
assert config.get('a') is None
assert config.get('b')[0] is None
def test_parse_override(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
a.b {
c = 7
d = 8
}
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('a.b.d') == 8
def test_concat_dict(self):
config = ConfigFactory.parse_string(
"""
a: {b: 1}
a: {c: 2}
b: {c: 3} {d: 4} {
c: 5
}
"""
)
assert config.get('a.b') == 1
assert config.get('a.c') == 2
assert config.get('b.c') == 5
assert config.get('b.d') == 4
def test_concat_string(self):
config = ConfigFactory.parse_string(
"""
a = a b c
b = 5 b
c = b 7
"""
)
assert config.get('a') == 'a b c'
assert config.get('b') == '5 b'
assert config.get('c') == 'b 7'
def test_concat_list(self):
config = ConfigFactory.parse_string(
"""
a = [1, 2] [3, 4] [
5,
6
]
"""
)
assert config.get('a') == [1, 2, 3, 4, 5, 6]
assert config.get_list('a') == [1, 2, 3, 4, 5, 6]
def test_bad_concat(self):
ConfigFactory.parse_string('a = 45\n')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = [4] "4"')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = "4" [5]')
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string('a = {b: 5} "4"')
def test_string_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = ${a.b.c}
f = ${a.b.e}
}
"""
)
assert config1.get('a.b.c') == 'str'
assert config1.get('d') == 'str'
assert config1.get('f') == 'str '
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c}
f = test ${a.b.e}
}
"""
)
assert config2.get('a.b.c') == 'str'
assert config2.get('d') == 'test str'
assert config2.get('f') == 'test str '
config3 = ConfigFactory.parse_string(
u"""
{
a: {
b: {
c = str
e = "str "
}
}
d = test ${a.b.c} me
f = test ${a.b.e} me
}
"""
)
assert config3.get('a.b.c') == 'str'
assert config3.get('d') == 'test str me'
assert config3.get('f') == 'test str me'
def test_string_substitutions_with_no_space(self):
config = ConfigFactory.parse_string(
"""
app.heap_size = 128
app.java_opts = [
-Xms${app.heap_size}m
-Xmx${app.heap_size}m
]
"""
)
assert config.get('app.java_opts') == [
'-Xms128m',
'-Xmx128m'
]
def test_int_substitutions(self):
config1 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = ${a.b.c}
}
"""
)
assert config1.get('a.b.c') == 5
assert config1.get('d') == 5
config2 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c}
}
"""
)
assert config2.get('a.b.c') == 5
assert config2.get('d') == 'test 5'
config3 = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = 5
}
}
d = test ${a.b.c} me
}
"""
)
assert config3.get('a.b.c') == 5
assert config3.get('d') == 'test 5 me'
def test_cascade_string_substitutions(self):
config = ConfigFactory.parse_string(
"""
{
a: {
b: {
c = ${e}
}
}
d = test ${a.b.c} me
e = 7
}
"""
)
assert config.get('a.b.c') == 7
assert config.get('d') == 'test 7 me'
def test_multiple_substitutions(self):
config = ConfigFactory.parse_string(
"""
a = 5
b=${a}${a}
c=${a} ${a}
"""
)
assert config == {
'a': 5,
'b': '55',
'c': '5 5'
}
def test_dict_substitutions(self):
config = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic} {name = "east"}
"""
)
assert config.get('data-center-east.cluster-size') == 6
assert config.get('data-center-east.name') == 'east'
config2 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
"""
)
assert config2.get('data-center-east.cluster-size') == 6
assert config2.get('data-center-east.name') == 'east'
config3 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic} { cluster-size = 9, opts = "-Xmx4g" }
"""
)
assert config3.get('data-center-east.cluster-size') == 9
assert config3.get('data-center-east.name') == 'east'
assert config3.get('data-center-east.opts') == '-Xmx4g'
config4 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = {name = "east"} ${data-center-generic}
data-center-east-prod = ${data-center-east} {tmpDir=/tmp}
"""
)
assert config4.get('data-center-east.cluster-size') == 6
assert config4.get('data-center-east.name') == 'east'
assert config4.get('data-center-east-prod.cluster-size') == 6
assert config4.get('data-center-east-prod.tmpDir') == '/tmp'
config5 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = ${data-center-generic}
data-center-east = { name = "east" }
"""
)
assert config5['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
config6 = ConfigFactory.parse_string(
"""
data-center-generic = { cluster-size = 6 }
data-center-east = { name = "east" }
data-center-east = ${data-center-generic}
"""
)
assert config6['data-center-east'] == {
'name': 'east',
'cluster-size': 6
}
def test_dos_chars_with_unquoted_string_noeol(self):
config = ConfigFactory.parse_string("foo = bar")
assert config['foo'] == 'bar'
def test_dos_chars_with_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = "5"')
assert config['foo'] == '5'
def test_dos_chars_with_triple_quoted_string_noeol(self):
config = ConfigFactory.parse_string('foo = """5"""')
assert config['foo'] == '5'
def test_dos_chars_with_int_noeol(self):
config = ConfigFactory.parse_string("foo = 5")
assert config['foo'] == 5
def test_dos_chars_with_float_noeol(self):
config = ConfigFactory.parse_string("foo = 5.0")
assert config['foo'] == 5.0
def test_list_substitutions(self):
config = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = ${common_modules} [java]
"""
)
assert config.get('host_modules') == ['php', 'python', 'java']
config2 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules}
"""
)
assert config2.get('host_modules') == ['java', 'php', 'python']
config3 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
"""
)
assert config3.get('common_modules') == ['php', 'python']
assert config3.get('host_modules') == ['java', 'php', 'python', 'perl']
config4 = ConfigFactory.parse_string(
"""
common_modules = [php, python]
host_modules = [java] ${common_modules} [perl]
full_modules = ${host_modules} [c, go]
"""
)
assert config4.get('common_modules') == ['php', 'python']
assert config4.get('host_modules') == ['java', 'php', 'python', 'perl']
assert config4.get('full_modules') == ['java', 'php', 'python', 'perl', 'c', 'go']
def test_list_element_substitution(self):
config = ConfigFactory.parse_string(
"""
main_language = php
languages = [java, ${main_language}]
"""
)
assert config.get('languages') == ['java', 'php']
def test_substitution_list_with_append(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.large-jvm-opts = ["-XX:+UseParNewGC"] [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ["-XX:+UseParNewGC"]
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC',
]
def test_substitution_list_with_append_substitution(self):
config = ConfigFactory.parse_string(
"""
application.foo = 128mm
application.default-jvm-opts = ["-XX:+UseParNewGC"]
application.large-jvm-opts = ${application.default-jvm-opts} [-Xm16g, ${application.foo}]
application.large-jvm-opts2 = [-Xm16g, ${application.foo}] ${application.default-jvm-opts}
""")
assert config["application.large-jvm-opts"] == [
'-XX:+UseParNewGC',
'-Xm16g',
'128mm'
]
assert config["application.large-jvm-opts2"] == [
'-Xm16g',
'128mm',
'-XX:+UseParNewGC'
]
def test_non_existent_substitution(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent}
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = ${non_existent} abc
"""
)
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
common_modules = abc ${non_existent} def
"""
)
def test_non_compatible_substitution(self):
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = 55 ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} 55
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules}
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = ${common_modules} aa
"""
)
with pytest.raises(ConfigWrongTypeException):
ConfigFactory.parse_string(
"""
common_modules = [perl]
host_modules = aa ${common_modules} bb
"""
)
def test_self_ref_substitution_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = ${x} [3,4]
x = [-1, 0] ${x} [5, 6]
x = [-3, -2] ${x}
"""
)
assert config.get("x") == [-3, -2, -1, 0, 1, 2, 3, 4, 5, 6]
def test_self_append_array(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x += [3,4]
"""
)
assert config.get("x") == [1, 2, 3, 4]
def test_self_append_string(self):
'''
Should be equivalent to
x = abc
x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x = abc
x += def
"""
)
assert config.get("x") == "abc def"
def test_self_append_non_existent_string(self):
'''
Should be equivalent to x = ${?x} def
'''
config = ConfigFactory.parse_string(
"""
x += def
"""
)
assert config.get("x") == " def"
def test_self_append_nonexistent_array(self):
config = ConfigFactory.parse_string(
"""
x += [1,2]
"""
)
assert config.get("x") == [1, 2]
def test_self_append_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1}
x += {b: 2}
"""
)
assert config.get("x") == {'a': 1, 'b': 2}
def test_self_append_nonexistent_object(self):
config = ConfigFactory.parse_string(
"""
x += {a: 1}
"""
)
assert config.get("x") == {'a': 1}
def test_self_ref_substitution_array_to_dict(self):
config = ConfigFactory.parse_string(
"""
x = [1,2]
x = {x: [3,4]}
x = {y: [5,6]}
x = {z: ${x}}
"""
)
assert config.get("x.x") == [3, 4]
assert config.get("x.y") == [5, 6]
assert config.get("x.z") == {'x': [3, 4], 'y': [5, 6]}
def test_self_ref_substitiotion_dict_in_array(self):
config = ConfigFactory.parse_string(
"""
x = {x: [3,4]}
x = [${x}, 2, 3]
"""
)
(one, two, three) = config.get("x")
assert one == {'x': [3, 4]}
assert two == 2
assert three == 3
def test_self_ref_substitution_dict_path(self):
config = ConfigFactory.parse_string(
"""
x = {y: {z: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == {'z': 1}
assert config.get("x.z") == 1
assert set(config.get("x").keys()) == set(['y', 'z'])
def test_self_ref_substitution_dict_path_hide(self):
config = ConfigFactory.parse_string(
"""
x = {y: {y: 1}}
x = ${x.y}
"""
)
assert config.get("x.y") == 1
assert set(config.get("x").keys()) == set(['y'])
def test_self_ref_substitution_dict_recurse(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
"""
)
def test_self_ref_substitution_dict_recurse2(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x}
x = ${x}
"""
)
def test_self_ref_substitution_dict_merge(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
foo : { a : { c : 1 } }
foo : ${foo.a}
foo : { a : 2 }
"""
)
assert config.get('foo') == {'a': 2, 'c': 1}
assert set(config.keys()) == set(['foo'])
def test_self_ref_substitution_dict_otherfield(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
"""
)
assert config.get("bar") == {'foo': 42, 'baz': 42}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
bar : {
foo : 42,
baz : ${bar.foo}
}
bar : { foo : 43 }
"""
)
assert config.get("bar") == {'foo': 43, 'baz': 43}
assert set(config.keys()) == set(['bar'])
def test_self_ref_substitution_dict_otherfield_merged_in_mutual(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
// bar.a should end up as 4
bar : { a : ${foo.d}, b : 1 }
bar.b = 3
// foo.c should end up as 3
foo : { c : ${bar.b}, d : 2 }
foo.d = 4
"""
)
assert config.get("bar") == {'a': 4, 'b': 3}
assert config.get("foo") == {'c': 3, 'd': 4}
assert set(config.keys()) == set(['bar', 'foo'])
def test_self_ref_substitution_string_opt_concat(self):
'''
Example from HOCON spec
'''
config = ConfigFactory.parse_string(
"""
a = ${?a}foo
"""
)
assert config.get("a") == 'foo'
assert set(config.keys()) == set(['a'])
def test_self_ref_substitution_dict_recurse_part(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
x = ${x} {y: 1}
x = ${x.y}
"""
)
def test_self_ref_substitution_object(self):
config = ConfigFactory.parse_string(
"""
x = {a: 1, b: 2}
x = ${x} {c: 3}
x = {z: 0} ${x}
x = {y: -1} ${x} {d: 4}
"""
)
assert config.get("x") == {'a': 1, 'b': 2, 'c': 3, 'z': 0, 'y': -1, 'd': 4}
def test_self_ref_child(self):
config = ConfigFactory.parse_string(
"""
a.b = 3
a.b = ${a.b}
a.b = ${a.b}
a.c = [1,2]
a.c = ${a.c}
a.d = {foo: bar}
a.d = ${a.d}
"""
)
assert config.get("a") == {'b': 3, 'c': [1, 2], 'd': {'foo': 'bar'}}
def test_concat_multi_line_string(self):
config = ConfigFactory.parse_string(
"""
common_modules = perl \
java \
python
"""
)
assert [x.strip() for x in config['common_modules'].split() if x.strip(' ') != ''] == ['perl', 'java', 'python']
def test_concat_multi_line_list(self):
config = ConfigFactory.parse_string(
"""
common_modules = [perl] \
[java] \
[python]
"""
)
assert config['common_modules'] == ['perl', 'java', 'python']
def test_concat_multi_line_dict(self):
config = ConfigFactory.parse_string(
"""
common_modules = {a:perl} \
{b:java} \
{c:python}
"""
)
assert config['common_modules'] == {'a': 'perl', 'b': 'java', 'c': 'python'}
def test_parse_URL_from_samples(self):
config = ConfigFactory.parse_URL("file:samples/aws.conf")
assert config.get('data-center-generic.cluster-size') == 6
assert config.get('large-jvm-opts') == ['-XX:+UseParNewGC', '-Xm16g']
def test_parse_URL_from_invalid(self):
config = ConfigFactory.parse_URL("https://nosuchurl")
assert config == []
def test_include_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/animals.conf")
assert config.get('cat.garfield.say') == 'meow'
assert config.get('dog.mutt.hates.garfield.say') == 'meow'
def test_include_glob_dict_from_samples(self):
config = ConfigFactory.parse_file("samples/all_animals.conf")
assert config.get('animals.garfield.say') == 'meow'
assert config.get('animals.mutt.hates.garfield.say') == 'meow'
def test_include_glob_list_from_samples(self):
config = ConfigFactory.parse_file("samples/all_bars.conf")
bars = config.get_list('bars')
assert len(bars) == 10
names = {bar['name'] for bar in bars}
types = {bar['type'] for bar in bars if 'type' in bar}
print(types, '(((((')
assert '<NAME>' in names
assert 'Homer\'s favorite coffee' in names
assert 'milk' in types
def test_list_of_dicts(self):
config = ConfigFactory.parse_string(
"""
a: [
{a: 1, b: 2},
{a: 3, c: 4},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2},
{'a': 3, 'c': 4}
]
def test_list_of_lists(self):
config = ConfigFactory.parse_string(
"""
a: [
[1, 2]
[3, 4]
]
"""
)
assert config['a'] == [
[1, 2],
[3, 4]
]
def test_list_of_dicts_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = {f: 4}
a: [
${b} {a: 1, b: 2},
{a: 3, c: 4} ${b},
{a: 3} ${b} {c: 6},
]
"""
)
assert config['a'] == [
{'a': 1, 'b': 2, 'f': 4},
{'a': 3, 'c': 4, 'f': 4},
{'a': 3, 'c': 6, 'f': 4}
]
def test_list_of_lists_with_merge(self):
config = ConfigFactory.parse_string(
"""
b = [5, 6]
a: [
${b} [1, 2]
[3, 4] ${b}
[1, 2] ${b} [7, 8]
]
"""
)
assert config['a'] == [
[5, 6, 1, 2],
[3, 4, 5, 6],
[1, 2, 5, 6, 7, 8]
]
def test_invalid_assignment(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('common_modules [perl]')
with pytest.raises(ParseException):
ConfigFactory.parse_string('common_modules {} {perl: 1}')
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {f: 5}
common_modules ${a} {perl: 1}
""")
def test_invalid_dict(self):
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string(
"""
a = {
f: 5
g
}
""")
with pytest.raises(ParseSyntaxException):
ConfigFactory.parse_string('a = {g}')
def test_include_file(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('[1, 2]')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: [
include "{tmp_file}"
]
""".format(tmp_file=fdin.name)
)
assert config1['a'] == [1, 2]
config2 = ConfigFactory.parse_string(
"""
a: [
include file("{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config2['a'] == [1, 2]
config3 = ConfigFactory.parse_string(
"""
a: [
include url("file://{tmp_file}")
]
""".format(tmp_file=fdin.name)
)
assert config3['a'] == [1, 2]
def test_include_missing_file(self):
config1 = ConfigFactory.parse_string(
"""
a: [
include "dummy.txt"
3
4
]
"""
)
assert config1['a'] == [3, 4]
def test_include_required_file(self):
config = ConfigFactory.parse_string(
"""
a {
include required("samples/animals.d/cat.conf")
t = 2
}
"""
)
expected = {
'a': {
'garfield': {
'say': 'meow'
},
't': 2
}
}
assert expected == config
config2 = ConfigFactory.parse_string(
"""
a {
include required(file("samples/animals.d/cat.conf"))
t = 2
}
"""
)
assert expected == config2
def test_include_missing_required_file(self):
with pytest.raises(IOError):
ConfigFactory.parse_string(
"""
a: [
include required("dummy.txt")
3
4
]
"""
)
def test_resolve_package_path(self):
path = ConfigParser.resolve_package_path("pyhocon:config_parser.py")
assert os.path.exists(path)
def test_resolve_package_path_format(self):
with pytest.raises(ValueError):
ConfigParser.resolve_package_path("pyhocon/config_parser.py")
def test_resolve_package_path_missing(self):
with pytest.raises(ImportError):
ConfigParser.resolve_package_path("non_existent_module:foo.py")
def test_include_package_file(self, monkeypatch):
temp_dir = tempfile.mkdtemp()
try:
module_dir = os.path.join(temp_dir, 'my_module')
module_conf = os.path.join(module_dir, 'my.conf')
# create the module folder and necessary files (__init__ and config)
os.mkdir(module_dir)
open(os.path.join(module_dir, '__init__.py'), 'a').close()
with open(module_conf, 'w') as fdin:
fdin.write("{c: 3}")
# add the temp dir to sys.path so that 'my_module' can be discovered
monkeypatch.syspath_prepend(temp_dir)
# load the config and include the other config file from 'my_module'
config = ConfigFactory.parse_string(
"""
a: 1
b: 2
include package("my_module:my.conf")
"""
)
# check that the contents of both config files are available
assert dict(config.as_plain_ordered_dict()) == {'a': 1, 'b': 2, 'c': 3}
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def test_include_dict(self):
expected_res = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{a: 1, b: 2}')
fdin.flush()
config1 = ConfigFactory.parse_string(
"""
a: {{
include "{tmp_file}"
c: 3
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config1['a'] == expected_res
config2 = ConfigFactory.parse_string(
"""
a: {{
c: 3
d: 4
include "{tmp_file}"
}}
""".format(tmp_file=fdin.name)
)
assert config2['a'] == expected_res
config3 = ConfigFactory.parse_string(
"""
a: {{
c: 3
include "{tmp_file}"
d: 4
}}
""".format(tmp_file=fdin.name)
)
assert config3['a'] == expected_res
def test_include_substitution(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('y = ${x}')
fdin.flush()
config = ConfigFactory.parse_string(
"""
include "{tmp_file}"
x = 42
""".format(tmp_file=fdin.name)
)
assert config['x'] == 42
assert config['y'] == 42
@pytest.mark.xfail
def test_include_substitution2(self):
with tempfile.NamedTemporaryFile('w') as fdin:
fdin.write('{ x : 10, y : ${x} }')
fdin.flush()
config = ConfigFactory.parse_string(
"""
{
a : { include """ + '"' + fdin.name + """" }
a : { x : 42 }
}
"""
)
assert config['a']['x'] == 42
assert config['a']['y'] == 42
def test_var_with_include_keyword(self):
config = ConfigFactory.parse_string(
"""
include-database=true
""")
assert config == {
'include-database': True
}
def test_substitution_override(self):
config = ConfigFactory.parse_string(
"""
database {
host = localhost
port = 5432
user = people
name = peopledb
pass = <PASSWORD>
}
user=test_user
pass=<PASSWORD>
database {
user = ${user}
pass = ${pass}
}
""")
assert config['database.user'] == 'test_user'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_flat_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
name = ${?NOT_EXISTS}
pass = ${?NOT_EXISTS}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_substitution_multiple_override(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: foo
c: ${a} ${b}
c: ${b} ${a}
d: ${a} ${b}
d: ${a} bar
""")
assert config['c'] == 'foo 1'
assert config['d'] == '1 bar'
def test_substitution_nested_override(self):
config = ConfigFactory.parse_string(
"""
database {
name = peopledb
pass = <PASSWORD>
}
database {
name = ${?user}
pass = ${?pass}
}
""")
assert config['database.name'] == 'peopledb'
assert config['database.pass'] == '<PASSWORD>'
def test_optional_with_merge(self):
unresolved = ConfigFactory.parse_string(
"""
foo: 42
foo: ${?a}
""", resolve=False)
source = ConfigFactory.parse_string(
"""
b: 14
""")
config = unresolved.with_fallback(source)
assert config['foo'] == 42
config = source.with_fallback(unresolved)
assert config['foo'] == 42
def test_fallback_with_resolve(self):
config3 = ConfigFactory.parse_string("c=5")
config2 = ConfigFactory.parse_string("b=${c}", resolve=False)
config1 = ConfigFactory.parse_string("a=${b}", resolve=False) \
.with_fallback(config2, resolve=False) \
.with_fallback(config3)
assert {'a': 5, 'b': 5, 'c': 5} == config1
def test_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
a = 45
b = ${?c}
d = ${?c} 4
e = ${?a}
g = ${?c1} ${?c2}
h = ${?c1} ${?c2} 1
""")
assert 'b' not in config
assert config['d'] == 4
assert config['e'] == 45
assert 'g' not in config
assert config['h'] == 1
def test_cascade_optional_substitution(self):
config = ConfigFactory.parse_string(
"""
num = 3
retries_msg = You have ${num} retries
retries_msg = ${?CUSTOM_MSG}
""")
assert config == {
'num': 3,
'retries_msg': 'You have 3 retries'
}
def test_substitution_cycle(self):
with pytest.raises(ConfigSubstitutionException):
ConfigFactory.parse_string(
"""
a = ${b}
b = ${c}
c = ${a}
""")
def test_assign_number_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
4
b = # test
# test2
5
c =
6
"""
)
assert config['a'] == 4
assert config['b'] == 5
assert config['c'] == 6
def test_assign_int(self):
config = ConfigFactory.parse_string(
"""
short = 12
long = 12321321837612378126213217321
negative = -15
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12
assert isinstance(config['short'], int)
assert config['long'] == 12321321837612378126213217321
assert isinstance(config['negative'], int)
assert config['negative'] == -15
def test_assign_float(self):
config = ConfigFactory.parse_string(
"""
a = 121.22
b = -121.22
c = .54
d = -.54
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['a'] == 121.22
assert config['b'] == -121.22
assert config['c'] == .54
assert config['d'] == -.54
def test_sci_real(self):
"""
Test scientific expression of number
"""
config = ConfigFactory.parse_string(
"""
short = 12.12321
long1 = 121.22E3423432
neg_long1 = 121.22E-1
long2 = 121.22e3423432
neg_long2 = 121.22e-3
"""
)
# on python 3 long will be an int but on python 2 long with be a long
assert config['short'] == 12.12321
assert config['long1'] == 121.22E3423432
assert config['neg_long1'] == 121.22E-1
assert config['long2'] == 121.22E3423432
assert config['neg_long2'] == 121.22E-3
def test_assign_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
"a"
b = # test
# test2
"b"
c =
"c"
"""
)
assert config['a'] == 'a'
assert config['b'] == 'b'
assert config['c'] == 'c'
def test_assign_list_numbers_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
1,
2,
]
b = # test
# test2
[
3,
4,]
c =
[
5,
6
]
"""
)
assert config['a'] == [1, 2]
assert config['b'] == [3, 4]
assert config['c'] == [5, 6]
def test_assign_list_strings_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
[
"a",
"b",
]
b = # test
# test2
[
"c",
"d",]
c =
[
"e",
"f"
]
"""
)
assert config['a'] == ['a', 'b']
assert config['b'] == ['c', 'd']
assert config['c'] == ['e', 'f']
def test_assign_dict_strings_with_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a =
{
a: 1,
b: 2,
}
b = # test
# test2
{
c: 3,
d: 4,}
c =
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_assign_dict_strings_no_equal_sign_with_eol(self):
config = ConfigFactory.parse_string(
"""
a
{
a: 1,
b: 2,
}
b # test
# test2
{
c: 3,
d: 4,}
c
{
e: 5,
f: 6
}
"""
)
assert config['a'] == {'a': 1, 'b': 2}
assert config['b'] == {'c': 3, 'd': 4}
assert config['c'] == {'e': 5, 'f': 6}
def test_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = 123
a = ${?test}
a = 5
"""
)
assert config1['a'] == 5
config2 = ConfigFactory.parse_string(
"""
{
database {
host = "localhost"
port = 8000
url = ${database.host}":"${database.port}
}
database {
host = ${?DB_HOST}
}
database {
host = "other.host.net"
port = 433
}
}
"""
)
assert config2['database']['host'] == 'other.host.net'
assert config2['database']['port'] == 433
assert config2['database']['url'] == 'other.host.net:433'
def test_fallback_substitutions_overwrite(self):
config1 = ConfigFactory.parse_string(
"""
a = {
b: 1
c: 2
}
"""
)
config2 = ConfigFactory.parse_string(
"""
a.b = 4
a.d = 3
"""
)
config3 = config1.with_fallback(config2)
assert config3['a'] == {
'b': 1,
'c': 2,
'd': 3
}
config4 = ConfigFactory.parse_string(
"""
name: foo
"""
)
config5 = ConfigFactory.parse_string(
u"""
longName: "long "${?name}
""",
resolve=False
)
config6 = config4.with_fallback(config5)
assert config6 == {
'longName': 'long foo',
'name': 'foo'
}
def test_fallback_substitutions_overwrite_file(self):
config1 = ConfigFactory.parse_string(
"""
{
data-center-generic = { cluster-size: 8 }
misc = "mist"
}
"""
)
# use unicode path here for regression testing https://github.com/chimpler/pyhocon/issues/44
config2 = config1.with_fallback(u'samples/aws.conf')
assert config2 == {
'data-center-generic': {'cluster-size': 8},
'data-center-east': {'cluster-size': 8, 'name': 'east'},
'misc': 'mist',
'default-jvm-opts': ['-XX:+UseParNewGC'],
'large-jvm-opts': ['-XX:+UseParNewGC', '-Xm16g']
}
def test_fallback_self_ref_substitutions_append(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list = ${list} [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_fallback_self_ref_substitutions_append_plus_equals(self):
config1 = ConfigFactory.parse_string(
"""
list = [ 1, 2, 3 ]
"""
)
config2 = ConfigFactory.parse_string(
"""
list += [ 4, 5, 6 ]
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("list") == [1, 2, 3, 4, 5, 6]
def test_self_merge_ref_substitutions_object(self):
config1 = ConfigFactory.parse_string(
"""
a : { }
b : 1
c : ${a} { d : [ ${b} ] }
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("c.d") == [1]
def test_self_merge_ref_substitutions_object2(self):
config1 = ConfigFactory.parse_string(
"""
x : { v1: 1 }
b1 : {v2: 2 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b2 : ${x} {v2: 3}
b += [${b2}]
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
b = merged.get("b")
assert len(b) == 2
assert b[0] == {'v2': 2}
assert b[1] == {'v1': 1, 'v2': 3}
def test_self_merge_ref_substitutions_object3(self):
config1 = ConfigFactory.parse_string(
"""
b1 : { v1: 1 }
b = [${b1}]
""",
resolve=False
)
config2 = ConfigFactory.parse_string(
"""
b1 : { v1: 2, v2: 3 }
""",
resolve=False
)
merged = ConfigTree.merge_configs(config1, config2)
ConfigParser.resolve_substitutions(merged)
assert merged.get("b1") == {"v1": 2, "v2": 3}
b = merged.get("b")
assert len(b) == 1
assert b[0] == {"v1": 2, "v2": 3}
def test_fallback_self_ref_substitutions_merge(self):
config1 = ConfigFactory.parse_string(
"""
dict = { x: 1 }
"""
)
config2 = ConfigFactory.parse_string(
"""
dict = ${dict} { y: 2 }
""",
resolve=False
)
config2 = config2.with_fallback(config1)
assert config2.get("dict") == {'x': 1, 'y': 2}
def test_fallback_self_ref_substitutions_concat_string(self):
config1 = ConfigFactory.parse_string(
"""
string = abc
"""
)
config2 = ConfigFactory.parse_string(
"""
string = ${string}def
""",
resolve=False
)
result = config2.with_fallback(config1)
assert result.get("string") == 'abcdef'
# test no mutation on config1
assert result is not config1
# test no mutation on config2
assert "abc" not in str(config2)
def test_fallback_non_root(self):
root = ConfigFactory.parse_string(
"""
a = 1
mid.b = 1
"""
)
config = root.get_config("mid").with_fallback(root)
assert config['a'] == 1 and config['b'] == 1
def test_object_field_substitution(self):
config = ConfigFactory.parse_string(
"""
A = ${Test}
Test {
field1 = 1
field2 = ${Test.field1}"2"
field3 = ${Test.field2}"3"
}
"""
)
assert config.get_string("A.field1") == "1"
assert config.get_string("A.field2") == "12"
assert config.get_string("A.field3") == "123"
assert config.get_string("Test.field1") == "1"
assert config.get_string("Test.field2") == "12"
assert config.get_string("Test.field3") == "123"
def test_one_line_quote_escape(self):
config = ConfigFactory.parse_string(
"""
test_no_quotes: abc\\n\\n
test_quotes: "abc\\n\\n"
"""
)
assert config == {
'test_no_quotes': 'abc\n\n',
'test_quotes': 'abc\n\n'
}
def test_multi_line_escape(self):
config = ConfigFactory.parse_string(
"""
with-escaped-backslash: \"\"\"
\\\\
\"\"\"
with-newline-escape-sequence: \"\"\"
\\n
\"\"\"
with-escaped-newline-escape-sequence: \"\"\"
\\\\n
\"\"\"
"""
)
assert config['with-escaped-backslash'] == '\n\\\\\n'
assert config['with-newline-escape-sequence'] == '\n\\n\n'
assert config['with-escaped-newline-escape-sequence'] == '\n\\\\n\n'
def test_multiline_with_backslash(self):
config = ConfigFactory.parse_string(
"""
test = line1 \
line2
test2 = test
""")
assert config == {
'test': 'line1 line2',
'test2': 'test'
}
def test_from_dict_with_dict(self):
d = {
'banana': 3,
'apple': 4,
'pear': 1,
'orange': 2,
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_ordered_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['orange'] = 2
config = ConfigFactory.from_dict(d)
assert config == d
def test_from_dict_with_nested_dict(self):
d = OrderedDict()
d['banana'] = 3
d['apple'] = 4
d['pear'] = 1
d['tree'] = {
'a': 'abc\ntest\n',
'b': [1, 2, 3]
}
config = ConfigFactory.from_dict(d)
assert config == d
def test_object_concat(self):
config = ConfigFactory.parse_string(
"""o1 = {
foo : {
a : 1
b : 2
}
}
o2 = {
foo : {
b : 3
c : 4
}
}
o3 = ${o1} ${o2}
"""
)
assert config.get_int('o1.foo.b') == 2
assert config.get_int('o2.foo.b') == 3
assert config.get_int('o3.foo.b') == 3
assert config.get_int('o1.foo.c', default=42) == 42
assert config.get_int('o3.foo.a') == 1
assert config.get_int('o3.foo.c') == 4
def test_issue_75(self):
config = ConfigFactory.parse_string(
"""base : {
bar: ["a"]
}
sub : ${base} {
baz: ${base.bar} ["b"]
}
sub2: ${sub}
"""
)
assert config.get_list('base.bar') == ["a"]
assert config.get_list('sub.baz') == ["a", "b"]
assert config.get_list('sub2.baz') == ["a", "b"]
def test_plain_ordered_dict(self):
config = ConfigFactory.parse_string(
"""
e : ${a} {
}
""",
resolve=False
)
with pytest.raises(ConfigException):
config.as_plain_ordered_dict()
def test_quoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
no_trailing_ws = "foo" "bar "
trailing_ws = "foo" "bar "{ws}
trailing_ws_with_comment = "foo" "bar "{ws}// comment
""".format(ws=' '))
assert config == {
'no_trailing_ws': "foo bar ",
'trailing_ws': "foo bar ",
'trailing_ws_with_comment': "foo bar "
}
def test_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo bar
""")
assert config == {
'a': 'foo bar'
}
def test_quoted_unquoted_strings_with_ws(self):
config = ConfigFactory.parse_string(
"""
a = foo "bar" dummy
""")
assert config == {
'a': 'foo bar dummy'
}
def test_quoted_unquoted_strings_with_ws_substitutions(self):
config = ConfigFactory.parse_string(
"""
x = 5
b = test
a = foo "bar" ${b} dummy
c = foo ${x} bv
d = foo ${x} 43
""")
assert config == {
'x': 5,
'b': 'test',
'a': 'foo bar test dummy',
'c': 'foo 5 bv',
'd': 'foo 5 43'
}
def test_complex_substitutions(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: ${c} {
pa: [${a}]
pb: ${b.pa}
}
c: { }
d: { pc: ${b.pa} }
e: ${b}
""", resolve=True)
assert config == {
'a': 1,
'b': {'pa': [1], 'pb': [1]},
'c': {},
'd': {'pc': [1]},
'e': {'pa': [1], 'pb': [1]}
}
def test_assign_next_line(self):
config = ConfigFactory.parse_string(
"""
a = // abc
abc
c =
5
""")
assert config == {
'a': 'abc',
'c': 5
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment(self):
config = ConfigFactory.parse_string(
"""
string_from_env = ${STRING_VAR}
""")
assert config == {
'string_from_env': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, STRING_VAR='value_from_environment')
def test_string_from_environment_self_ref_optional(self):
config = ConfigFactory.parse_string(
"""
STRING_VAR = ${?STRING_VAR}
""")
assert config == {
'STRING_VAR': 'value_from_environment'
}
@mock.patch.dict(os.environ, TRUE_OR_FALSE='false')
def test_bool_from_environment(self):
config = ConfigFactory.parse_string(
"""
bool_from_env = ${TRUE_OR_FALSE}
""")
assert config == {
'bool_from_env': 'false'
}
assert config.get_bool('bool_from_env') is False
@mock.patch.dict(os.environ, INT_VAR='5')
def test_int_from_environment(self):
config = ConfigFactory.parse_string(
"""
int_from_env = ${INT_VAR}
""")
assert config == {
'int_from_env': '5'
}
assert config.get_int('int_from_env') == 5
def test_unicode_dict_key(self):
input_string = u"""
www.sample.com {
us {
name = "first domain"
}
}
www.example-ö.com {
us {
name = "second domain"
}
}
"""
config = ConfigFactory.parse_string(input_string)
assert config.get_string(u'www.sample.com.us.name') == 'first domain'
assert config.get_string(u'www.example-ö.com.us.name') == 'second domain'
with pytest.raises(ConfigWrongTypeException):
config.put(u'www.example-ö', 'append_failure', append=True)
with pytest.raises(ConfigMissingException):
config.get_string(u'missing_unicode_key_ö')
with pytest.raises(ConfigException):
config.get_bool(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_list(u'www.example-ö.com.us.name')
with pytest.raises(ConfigException):
config.get_config(u'www.example-ö.com.us.name')
with pytest.raises(ConfigWrongTypeException):
config.get_string(u'www.example-ö.com.us.name.missing')
def test_with_comment_on_last_line(self):
# Adress issue #102
config_tree = ConfigFactory.parse_string("""
foo: "1"
bar: "2"
# DO NOT CHANGE ANY OF THE ABOVE SETTINGS!""")
assert config_tree == {
'foo': '1',
'bar': '2'
}
def test_triple_quotes_same_line(self):
config_tree = ConfigFactory.parse_string('a:["""foo"""", "bar"]')
assert config_tree == {
'a': ['foo"', "bar"]
}
def test_pop(self):
config_tree = ConfigFactory.parse_string('a:{b: 3, d: 6}')
assert 3 == config_tree.pop('a.b', 5)
assert 5 == config_tree.pop('a.c', 5)
expected = {
'a': {'d': 6}
}
assert expected == config_tree
def test_merge_overriden(self):
# Adress issue #110
# ConfigValues must merge with its .overriden_value
# if both are ConfigTree
config_tree = ConfigFactory.parse_string("""
foo: ${bar}
foo: ${baz}
bar: {r: 1, s: 2}
baz: {s: 3, t: 4}
""")
assert 'r' in config_tree['foo'] and 't' in config_tree['foo'] and config_tree['foo']['s'] == 3
def test_attr_syntax(self):
config = ConfigFactory.parse_string(
"""
a: 1
b: {
pb: 5
}
""")
assert 5 == config.b.pb
def test_escape_quote(self):
config = ConfigFactory.parse_string(
"""
quoted: "abc\\"test"
unquoted: abc\\"test
""")
assert 'abc"test' == config['quoted']
assert 'abc"test' == config['unquoted']
def test_escape_quote_complex(self):
config = ConfigFactory.parse_string(
"""
value: "{\\"critical\\":\\"0.00\\",\\"warning\\":\\"99.99\\"}"
"""
)
assert '{"critical":"0.00","warning":"99.99"}' == config['value']
def test_keys_with_slash(self):
config = ConfigFactory.parse_string(
"""
/abc/cde1: abc
"/abc/cde2": "cde"
/abc/cde3: "fgh"
""")
assert 'abc' == config['/abc/cde1']
assert 'cde' == config['/abc/cde2']
assert 'fgh' == config['/abc/cde3']
def test_mutation_values(self):
config = ConfigFactory.parse_string(
"""
common : {
}
b1 = []
var = "wrong"
compilerCommon : ${common} {
VAR : ${var}
}
substrate-suite: {
VAR : "right"
}
b1 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
b2 = [
${compilerCommon} ${substrate-suite}
${compilerCommon} ${substrate-suite}
]
""")
assert config.get("b1")[1]['VAR'] == 'right'
assert config.get("b2")[1]['VAR'] == 'right'
def test_escape_sequences_json_equivalence(self):
"""
Quoted strings are in the same format as JSON strings,
See: https://github.com/lightbend/config/blob/master/HOCON.md#unchanged-from-json
"""
source = r"""
{
"plain-backslash": "\\",
"tab": "\t",
"no-tab": "\\t",
"newline": "\n",
"no-newline": "\\n",
"cr": "\r",
"no-cr": "\\r",
"windows": "c:\\temp"
}
"""
expected = {
'plain-backslash': '\\',
'tab': '\t',
'no-tab': '\\t',
'newline': '\n',
'no-newline': '\\n',
'cr': '\r',
'no-cr': '\\r',
'windows': 'c:\\temp',
}
config = ConfigFactory.parse_string(source)
assert config == expected
assert config == json.loads(source)
try:
from dateutil.relativedelta import relativedelta
@pytest.mark.parametrize('data_set', [
('a: 1 months', relativedelta(months=1)),
('a: 1months', relativedelta(months=1)),
('a: 2 month', relativedelta(months=2)),
('a: 3 mo', relativedelta(months=3)),
('a: 3mo', relativedelta(months=3)),
('a: 3 mon', '3 mon'),
('a: 1 years', relativedelta(years=1)),
('a: 1years', relativedelta(years=1)),
('a: 2 year', relativedelta(years=2)),
('a: 3 y', relativedelta(years=3)),
('a: 3y', relativedelta(years=3)),
])
def test_parse_string_with_duration_optional_units(data_set):
config = ConfigFactory.parse_string(data_set[0])
assert config['a'] == data_set[1]
except Exception:
pass
|
scenario_runner/srunner/scenariomanager/scenario_manager.py | cgeller/WorldOnRails | 447 | 3458 | <reponame>cgeller/WorldOnRails<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (c) 2018-2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""
This module provides the ScenarioManager implementation.
It must not be modified and is for reference only!
"""
from __future__ import print_function
import sys
import time
import py_trees
from srunner.autoagents.agent_wrapper import AgentWrapper
from srunner.scenariomanager.carla_data_provider import CarlaDataProvider
from srunner.scenariomanager.result_writer import ResultOutputProvider
from srunner.scenariomanager.timer import GameTime
from srunner.scenariomanager.watchdog import Watchdog
class ScenarioManager(object):
"""
Basic scenario manager class. This class holds all functionality
required to start, and analyze a scenario.
The user must not modify this class.
To use the ScenarioManager:
1. Create an object via manager = ScenarioManager()
2. Load a scenario via manager.load_scenario()
3. Trigger the execution of the scenario manager.run_scenario()
This function is designed to explicitly control start and end of
the scenario execution
4. Trigger a result evaluation with manager.analyze_scenario()
5. If needed, cleanup with manager.stop_scenario()
"""
def __init__(self, debug_mode=False, sync_mode=False, timeout=2.0):
"""
Setups up the parameters, which will be filled at load_scenario()
"""
self.scenario = None
self.scenario_tree = None
self.scenario_class = None
self.ego_vehicles = None
self.other_actors = None
self._debug_mode = debug_mode
self._agent = None
self._sync_mode = sync_mode
self._running = False
self._timestamp_last_run = 0.0
self._timeout = timeout
self._watchdog = Watchdog(float(self._timeout))
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
def _reset(self):
"""
Reset all parameters
"""
self._running = False
self._timestamp_last_run = 0.0
self.scenario_duration_system = 0.0
self.scenario_duration_game = 0.0
self.start_system_time = None
self.end_system_time = None
GameTime.restart()
def cleanup(self):
"""
This function triggers a proper termination of a scenario
"""
if self.scenario is not None:
self.scenario.terminate()
if self._agent is not None:
self._agent.cleanup()
self._agent = None
CarlaDataProvider.cleanup()
def load_scenario(self, scenario, agent=None):
"""
Load a new scenario
"""
self._reset()
self._agent = AgentWrapper(agent) if agent else None
if self._agent is not None:
self._sync_mode = True
self.scenario_class = scenario
self.scenario = scenario.scenario
self.scenario_tree = self.scenario.scenario_tree
self.ego_vehicles = scenario.ego_vehicles
self.other_actors = scenario.other_actors
# To print the scenario tree uncomment the next line
# py_trees.display.render_dot_tree(self.scenario_tree)
if self._agent is not None:
self._agent.setup_sensors(self.ego_vehicles[0], self._debug_mode)
def run_scenario(self):
"""
Trigger the start of the scenario and wait for it to finish/fail
"""
print("ScenarioManager: Running scenario {}".format(self.scenario_tree.name))
self.start_system_time = time.time()
start_game_time = GameTime.get_time()
self._watchdog.start()
self._running = True
while self._running:
timestamp = None
world = CarlaDataProvider.get_world()
if world:
snapshot = world.get_snapshot()
if snapshot:
timestamp = snapshot.timestamp
if timestamp:
self._tick_scenario(timestamp)
self._watchdog.stop()
self.cleanup()
self.end_system_time = time.time()
end_game_time = GameTime.get_time()
self.scenario_duration_system = self.end_system_time - \
self.start_system_time
self.scenario_duration_game = end_game_time - start_game_time
if self.scenario_tree.status == py_trees.common.Status.FAILURE:
print("ScenarioManager: Terminated due to failure")
def _tick_scenario(self, timestamp):
"""
Run next tick of scenario and the agent.
If running synchornously, it also handles the ticking of the world.
"""
if self._timestamp_last_run < timestamp.elapsed_seconds and self._running:
self._timestamp_last_run = timestamp.elapsed_seconds
self._watchdog.update()
if self._debug_mode:
print("\n--------- Tick ---------\n")
# Update game time and actor information
GameTime.on_carla_tick(timestamp)
CarlaDataProvider.on_carla_tick()
if self._agent is not None:
ego_action = self._agent()
# Tick scenario
self.scenario_tree.tick_once()
if self._debug_mode:
print("\n")
py_trees.display.print_ascii_tree(self.scenario_tree, show_status=True)
sys.stdout.flush()
if self.scenario_tree.status != py_trees.common.Status.RUNNING:
self._running = False
if self._agent is not None:
self.ego_vehicles[0].apply_control(ego_action)
if self._sync_mode and self._running and self._watchdog.get_status():
CarlaDataProvider.get_world().tick()
def get_running_status(self):
"""
returns:
bool: False if watchdog exception occured, True otherwise
"""
return self._watchdog.get_status()
def stop_scenario(self):
"""
This function is used by the overall signal handler to terminate the scenario execution
"""
self._running = False
def analyze_scenario(self, stdout, filename, junit):
"""
This function is intended to be called from outside and provide
the final statistics about the scenario (human-readable, in form of a junit
report, etc.)
"""
failure = False
timeout = False
result = "SUCCESS"
if self.scenario.test_criteria is None:
print("Nothing to analyze, this scenario has no criteria")
return True
for criterion in self.scenario.get_criteria():
if (not criterion.optional and
criterion.test_status != "SUCCESS" and
criterion.test_status != "ACCEPTABLE"):
failure = True
result = "FAILURE"
elif criterion.test_status == "ACCEPTABLE":
result = "ACCEPTABLE"
if self.scenario.timeout_node.timeout and not failure:
timeout = True
result = "TIMEOUT"
output = ResultOutputProvider(self, result, stdout, filename, junit)
output.write()
return failure or timeout
|
ikalog/ui/options.py | fetus-hina/IkaLog | 285 | 3485 | <reponame>fetus-hina/IkaLog
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gettext
import wx
import wx.lib.scrolledpanel
import ikalog.outputs
from ikalog.ui.events import *
from ikalog.ui.panel import *
from ikalog.ui import VideoCapture
from ikalog.utils import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
class OptionsGUI(object):
def __init__(self, ikalog_gui):
self.ikalog_gui = ikalog_gui
self.frame = None
self._init_frame()
def _init_frame(self):
if self.frame:
return
self.frame = wx.Frame(
self.ikalog_gui.frame, wx.ID_ANY, _("Options"), size=(640, 500))
self.notebook = wx.Notebook(self.frame, wx.ID_ANY)
# Apply button
button_apply = wx.Button(self.frame, wx.ID_ANY, _(u'Apply'))
# Use a bold font.
apply_font = button_apply.GetFont()
apply_font.SetWeight(wx.FONTWEIGHT_BOLD)
button_apply.SetFont(apply_font)
button_cancel = wx.Button(self.frame, wx.ID_ANY, _(u'Cancel'))
button_load_default = wx.Button(
self.frame, wx.ID_ANY, _(u'Load default'))
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_sizer.Add(button_apply)
buttons_sizer.Add(button_cancel)
buttons_sizer.Add(button_load_default)
top_sizer = wx.BoxSizer(wx.VERTICAL)
top_sizer.Add(self.notebook)
top_sizer.Add(buttons_sizer)
self.frame.SetSizer(top_sizer)
# Set event handlers for buttons.
button_apply.Bind(wx.EVT_BUTTON, self.on_button_apply)
button_cancel.Bind(wx.EVT_BUTTON, self.on_button_cancel)
button_load_default.Bind(wx.EVT_BUTTON, self.on_button_load_default)
outputs = [self.ikalog_gui.capture] + self.ikalog_gui.outputs
self._init_outputs(outputs)
# self.capture.panel is a part of self.frame. This Bind propagates
# capture's source change to the preview.
self.ikalog_gui.capture.panel.Bind(
EVT_INPUT_INITIALIZED, self.ikalog_gui.on_input_initialized)
# Refresh UI of each plugin.
self.ikalog_gui.engine.call_plugins(
'on_config_load_from_context', debug=True)
def show(self):
if not self.frame:
self._init_frame()
self.frame.Show()
self.frame.Raise()
def on_button_apply(self, event):
self.ikalog_gui.on_options_apply(event)
def on_button_cancel(self, event):
self.ikalog_gui.on_options_cancel(event)
def on_button_load_default(self, event):
self.ikalog_gui.on_options_load_default(event)
def _init_outputs(self, outputs):
output_dict = {}
for output in outputs:
output_dict[output.__class__] = output
# Keys for outputs in the main page.
keys = [
ikalog.ui.VideoCapture,
ikalog.outputs.OBS,
ikalog.outputs.StatInk,
ikalog.outputs.Twitter
]
# Keys for outputs combined into the misc tab.
misc_keys = [
ikalog.outputs.CSV,
ikalog.outputs.JSON,
ikalog.outputs.Screenshot,
ikalog.outputs.Boyomi,
ikalog.outputs.Slack,
ikalog.outputs.WebSocketServer,
]
for key in output_dict.keys():
if key in misc_keys:
continue
if key not in keys:
keys.append(key)
# Main tabs
index = 0
for key in keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.notebook)
self.notebook.InsertPage(index, output.panel, output.panel_name)
index += 1
# Misc tab
self.misc_panel = wx.lib.scrolledpanel.ScrolledPanel(
self.notebook, wx.ID_ANY, size=(640, 360))
self.misc_panel_sizer = wx.BoxSizer(wx.VERTICAL)
default_font = self.misc_panel.GetFont()
title_font = wx.Font(default_font.GetPointSize(),
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
for key in misc_keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.misc_panel)
title = wx.StaticText(self.misc_panel, wx.ID_ANY, output.panel_name)
title.SetFont(title_font)
self.misc_panel_sizer.Add(title)
self.misc_panel_sizer.Add(
output.panel, flag=wx.EXPAND | wx.ALL, border=10)
self.misc_panel_sizer.Add((-1, 25))
self.misc_panel.SetSizer(self.misc_panel_sizer)
self.misc_panel.SetupScrolling()
self.notebook.InsertPage(index, self.misc_panel, _('Misc.'))
|
tests/stack_test.py | arthurlogilab/py_zipkin | 225 | 3494 | import mock
import pytest
import py_zipkin.storage
@pytest.fixture(autouse=True, scope="module")
def create_zipkin_attrs():
# The following tests all expect _thread_local.zipkin_attrs to exist: if it
# doesn't, mock.patch will fail.
py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_returns_none_if_no_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().get()
assert not py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_none_if_no_zipkin_attrs():
with mock.patch.object(py_zipkin.storage.log, "warning", autospec=True) as log:
assert not py_zipkin.storage.Stack([]).get()
assert log.call_count == 1
def test_storage_stack_still_works_if_you_dont_pass_in_storage():
# Let's make sure this still works if we don't pass in a custom storage.
assert not py_zipkin.storage.Stack().get()
def test_get_zipkin_attrs_returns_the_last_of_the_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_get_zipkin_attrs_with_context_returns_the_last_of_the_list():
assert "foo" == py_zipkin.storage.Stack(["bar", "foo"]).get()
def test_pop_zipkin_attrs_does_nothing_if_no_requests():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", []):
assert not py_zipkin.storage.ThreadLocalStack().pop()
def test_pop_zipkin_attrs_with_context_does_nothing_if_no_requests():
assert not py_zipkin.storage.Stack([]).pop()
def test_pop_zipkin_attrs_removes_the_last_zipkin_attrs():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo", "bar"]):
assert "bar" == py_zipkin.storage.ThreadLocalStack().pop()
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
def test_pop_zipkin_attrs_with_context_removes_the_last_zipkin_attrs():
context_stack = py_zipkin.storage.Stack(["foo", "bar"])
assert "bar" == context_stack.pop()
assert "foo" == context_stack.get()
def test_push_zipkin_attrs_adds_new_zipkin_attrs_to_list():
tracer = py_zipkin.storage.get_default_tracer()
with mock.patch.object(tracer._context_stack, "_storage", ["foo"]):
assert "foo" == py_zipkin.storage.ThreadLocalStack().get()
py_zipkin.storage.ThreadLocalStack().push("bar")
assert "bar" == py_zipkin.storage.ThreadLocalStack().get()
def test_push_zipkin_attrs_with_context_adds_new_zipkin_attrs_to_list():
stack = py_zipkin.storage.Stack(["foo"])
assert "foo" == stack.get()
stack.push("bar")
assert "bar" == stack.get()
def test_stack_copy():
stack = py_zipkin.storage.Stack()
stack.push("a")
stack.push("b")
the_copy = stack.copy()
the_copy.push("c")
stack.push("d")
assert ["a", "b", "c"] == the_copy._storage
assert ["a", "b", "d"] == stack._storage
|
desktop_local_tests/windows/test_windows_packet_capture_disrupt_force_public_dns_servers.py | UAEKondaya1/expressvpn_leak_testing | 219 | 3510 | <reponame>UAEKondaya1/expressvpn_leak_testing
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.windows.windows_dns_force_public_dns_servers_disrupter import WindowsDNSForcePublicDNSServersDisrupter
class TestWindowsPacketCaptureDisruptForcePublicDNSServers(LocalPacketCaptureTestCaseWithDisrupter):
# TODO: Make the packet capture here DNS specific?
def __init__(self, devices, parameters):
super().__init__(WindowsDNSForcePublicDNSServersDisrupter, devices, parameters)
|
homeassistant/components/zha/core/channels/lighting.py | liangleslie/core | 30,023 | 3530 | """Lighting channels module for Zigbee Home Automation."""
from __future__ import annotations
from contextlib import suppress
from zigpy.zcl.clusters import lighting
from .. import registries
from ..const import REPORT_CONFIG_DEFAULT
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)
class Ballast(ZigbeeChannel):
"""Ballast channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)
class ColorClientChannel(ClientChannel):
"""Color client channel."""
@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
REPORT_CONFIG = (
{"attr": "current_x", "config": REPORT_CONFIG_DEFAULT},
{"attr": "current_y", "config": REPORT_CONFIG_DEFAULT},
{"attr": "color_temperature", "config": REPORT_CONFIG_DEFAULT},
)
MAX_MIREDS: int = 500
MIN_MIREDS: int = 153
ZCL_INIT_ATTRS = {
"color_mode": False,
"color_temp_physical_min": True,
"color_temp_physical_max": True,
"color_capabilities": True,
"color_loop_active": False,
}
@property
def color_capabilities(self) -> int:
"""Return color capabilities of the light."""
with suppress(KeyError):
return self.cluster["color_capabilities"]
if self.cluster.get("color_temperature") is not None:
return self.CAPABILITIES_COLOR_XY | self.CAPABILITIES_COLOR_TEMP
return self.CAPABILITIES_COLOR_XY
@property
def color_mode(self) -> int | None:
"""Return cached value of the color_mode attribute."""
return self.cluster.get("color_mode")
@property
def color_loop_active(self) -> int | None:
"""Return cached value of the color_loop_active attribute."""
return self.cluster.get("color_loop_active")
@property
def color_temperature(self) -> int | None:
"""Return cached value of color temperature."""
return self.cluster.get("color_temperature")
@property
def current_x(self) -> int | None:
"""Return cached value of the current_x attribute."""
return self.cluster.get("current_x")
@property
def current_y(self) -> int | None:
"""Return cached value of the current_y attribute."""
return self.cluster.get("current_y")
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_max", self.MAX_MIREDS)
|
querybuilder/tests/window_tests.py | wesokes/django-query-builder | 110 | 3535 | <reponame>wesokes/django-query-builder<filename>querybuilder/tests/window_tests.py<gh_stars>100-1000
from querybuilder.fields import (
RankField, RowNumberField, DenseRankField, PercentRankField, CumeDistField, NTileField, LagField,
LeadField, FirstValueField, LastValueField, NthValueField, NumStdDevField
)
from querybuilder.query import QueryWindow, Query
from querybuilder.tests.models import Order
from querybuilder.tests.query_tests import QueryTestCase, get_comparison_str
class QueryWindowTest(QueryTestCase):
def test_query_window(self):
query_window = QueryWindow()
query_str = query_window.get_sql()
expected_query = 'OVER ()'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition(self):
query_window = QueryWindow().partition_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_order(self):
query_window = QueryWindow().order_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order(self):
query_window = QueryWindow().partition_by(
'field_one'
).order_by(
'field_one'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order_many(self):
query_window = QueryWindow().partition_by(
'field_one'
).partition_by(
'field_two'
).order_by(
'field_one'
).order_by(
'-field_two'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one, field_two ORDER BY field_one ASC, field_two DESC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
class WindowFunctionTest(QueryTestCase):
def test_rank_no_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField()
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField(
over=QueryWindow()
)
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() OVER () AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_order(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().order_by(
'id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (ORDER BY id ASC) AS "rank" FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_partition(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (PARTITION BY account_id) AS "rank" FROM '
'querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_row_number(self):
query = Query().from_table(
table=Order,
fields=[
'*',
RowNumberField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'row_number'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'ROW_NUMBER() OVER (ORDER BY margin DESC) AS "row_number" '
'FROM querybuilder_tests_order '
'ORDER BY row_number '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
).order_by(
'id'
)
)
]
).order_by(
'-rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, '
'RANK() OVER (PARTITION BY account_id ORDER BY id ASC) AS "rank" '
'FROM querybuilder_tests_order '
'ORDER BY rank '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_dense_rank(self):
query = Query().from_table(
table=Order,
fields=[
'*',
DenseRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'dense_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'DENSE_RANK() OVER (ORDER BY margin DESC) AS "dense_rank" '
'FROM querybuilder_tests_order '
'ORDER BY dense_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_percent(self):
query = Query().from_table(
table=Order,
fields=[
'*',
PercentRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'percent_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'PERCENT_RANK() OVER (ORDER BY margin DESC) AS "percent_rank" '
'FROM querybuilder_tests_order '
'ORDER BY percent_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_cume_dist(self):
query = Query().from_table(
table=Order,
fields=[
'*',
CumeDistField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'cume_dist'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'CUME_DIST() OVER (ORDER BY margin DESC) AS "cume_dist" '
'FROM querybuilder_tests_order '
'ORDER BY cume_dist '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_ntile(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NTileField(
num_buckets=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'ntile'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTILE(2) OVER (ORDER BY margin DESC) AS "ntile" '
'FROM querybuilder_tests_order '
'ORDER BY ntile '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag_default(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
default=0,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1, \'0\') OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lead(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LeadField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LEAD(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lead" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_first_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
FirstValueField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'FIRST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin DESC) AS "margin_first_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_last_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LastValueField(
'margin',
over=QueryWindow().order_by(
'margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin ASC) AS "margin_last_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_nth_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NthValueField(
'margin',
n=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTH_VALUE(querybuilder_tests_order.margin, 2) OVER (ORDER BY margin DESC) AS "margin_nth_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_num_stddev(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NumStdDevField(
'margin',
over=QueryWindow()
)
]
).order_by(
'-margin_num_stddev'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'(CASE WHEN (STDDEV(querybuilder_tests_order.margin) OVER ()) <> 0 '
'THEN ((querybuilder_tests_order.margin - ('
'AVG(querybuilder_tests_order.margin) OVER ())) / (STDDEV(querybuilder_tests_order.margin) OVER ())) '
'ELSE 0 '
'END) '
'AS "margin_num_stddev" '
'FROM querybuilder_tests_order '
'ORDER BY margin_num_stddev '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
|
request/management/commands/purgerequests.py | hramezani/django-request | 373 | 3564 | from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from ...models import Request
DURATION_OPTIONS = {
'hours': lambda amount: timezone.now() - timedelta(hours=amount),
'days': lambda amount: timezone.now() - timedelta(days=amount),
'weeks': lambda amount: timezone.now() - timedelta(weeks=amount),
'months': lambda amount: timezone.now() + relativedelta(months=-amount),
'years': lambda amount: timezone.now() + relativedelta(years=-amount),
}
try:
# to keep backward Python 2 compatibility
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Purge old requests.'
def add_arguments(self, parser):
parser.add_argument(
'amount',
type=int,
)
parser.add_argument('duration')
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
def handle(self, *args, **options):
amount = options['amount']
duration = options['duration']
# Check we have the correct values
if duration[-1] != 's': # If its not plural, make it plural
duration_plural = '{0}s'.format(duration)
else:
duration_plural = duration
if duration_plural not in DURATION_OPTIONS:
raise CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))
qs = Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))
count = qs.count()
if count == 0:
print('There are no requests to delete.')
return
if options.get('interactive'):
confirm = input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount, duration, count))
else:
confirm = 'yes'
if confirm == 'yes':
qs.delete()
else:
print('Purge cancelled')
|
compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py | medismailben/llvm-project | 2,338 | 3569 | def getRoot(config):
if not config.parent:
return config
return getRoot(config.parent)
root = getRoot(config)
# We only run a small set of tests on Windows for now.
# Override the parent directory's "unsupported" decision until we can handle
# all of its tests.
if root.host_os in ['Windows']:
config.unsupported = False
else:
config.unsupported = True
|
lib/python/test/__init__.py | woozhijun/cat | 17,318 | 3587 | #!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8") |
platformio/commands/home/run.py | Granjow/platformio-core | 4,744 | 3601 | # Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from urllib.parse import urlparse
import click
import uvicorn
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import PlainTextResponse
from starlette.routing import Mount, Route, WebSocketRoute
from starlette.staticfiles import StaticFiles
from starlette.status import HTTP_403_FORBIDDEN
from platformio.commands.home.rpc.handlers.account import AccountRPC
from platformio.commands.home.rpc.handlers.app import AppRPC
from platformio.commands.home.rpc.handlers.ide import IDERPC
from platformio.commands.home.rpc.handlers.misc import MiscRPC
from platformio.commands.home.rpc.handlers.os import OSRPC
from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC
from platformio.commands.home.rpc.handlers.project import ProjectRPC
from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory
from platformio.compat import aio_get_running_loop
from platformio.exception import PlatformioException
from platformio.package.manager.core import get_core_package_dir
from platformio.proc import force_exit
class ShutdownMiddleware:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] == "http" and b"__shutdown__" in scope.get("query_string", {}):
await shutdown_server()
await self.app(scope, receive, send)
async def shutdown_server(_=None):
aio_get_running_loop().call_later(0.5, force_exit)
return PlainTextResponse("Server has been shutdown!")
async def protected_page(_):
return PlainTextResponse(
"Protected PlatformIO Home session", status_code=HTTP_403_FORBIDDEN
)
def run_server(host, port, no_open, shutdown_timeout, home_url):
contrib_dir = get_core_package_dir("contrib-piohome")
if not os.path.isdir(contrib_dir):
raise PlatformioException("Invalid path to PIO Home Contrib")
ws_rpc_factory = WebSocketJSONRPCServerFactory(shutdown_timeout)
ws_rpc_factory.addObjectHandler(AccountRPC(), namespace="account")
ws_rpc_factory.addObjectHandler(AppRPC(), namespace="app")
ws_rpc_factory.addObjectHandler(IDERPC(), namespace="ide")
ws_rpc_factory.addObjectHandler(MiscRPC(), namespace="misc")
ws_rpc_factory.addObjectHandler(OSRPC(), namespace="os")
ws_rpc_factory.addObjectHandler(PIOCoreRPC(), namespace="core")
ws_rpc_factory.addObjectHandler(ProjectRPC(), namespace="project")
path = urlparse(home_url).path
routes = [
WebSocketRoute(path + "wsrpc", ws_rpc_factory, name="wsrpc"),
Route(path + "__shutdown__", shutdown_server, methods=["POST"]),
Mount(path, StaticFiles(directory=contrib_dir, html=True), name="static"),
]
if path != "/":
routes.append(Route("/", protected_page))
uvicorn.run(
Starlette(
middleware=[Middleware(ShutdownMiddleware)],
routes=routes,
on_startup=[
lambda: click.echo(
"PIO Home has been started. Press Ctrl+C to shutdown."
),
lambda: None if no_open else click.launch(home_url),
],
),
host=host,
port=port,
log_level="warning",
)
|
tests/test_serialize.py | aferrall/redner | 1,146 | 3607 | import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
|
test/IECoreMaya/ImageConverterTest.py | bradleyhenke/cortex | 386 | 3623 | <reponame>bradleyhenke/cortex
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import IECore
import IECoreImage
import IECoreMaya
class ImageConverterTest( IECoreMaya.TestCase ) :
def test( self ) :
imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read()
toMaya = IECoreMaya.ToMayaImageConverter( imageA )
mImage = maya.OpenMaya.MImage()
toMaya.convert( mImage )
fromMaya = IECoreMaya.FromMayaImageConverter( mImage )
imageB = fromMaya.convert()
self.assertFalse(
IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value
)
if __name__ == "__main__":
IECoreMaya.TestProgram()
|
homeassistant/components/todoist/types.py | MrDelik/core | 30,023 | 3637 | <gh_stars>1000+
"""Types for the Todoist component."""
from __future__ import annotations
from typing import TypedDict
class DueDate(TypedDict):
"""Dict representing a due date in a todoist api response."""
date: str
is_recurring: bool
lang: str
string: str
timezone: str | None
|
test/unit/data/model/mapping/common.py | quacksawbones/galaxy-1 | 1,085 | 3639 | from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
|
src/front-door/azext_front_door/_validators.py | Mannan2812/azure-cli-extensions | 207 | 3673 | <reponame>Mannan2812/azure-cli-extensions<gh_stars>100-1000
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
def get_name_or_id_validator(dest, child_type=None, resource_type='Frontdoors', resource_namespace='Microsoft.Network',
resource_name_dest='front_door_name'):
def _validate_name_or_id(cmd, namespace):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import is_valid_resource_id, resource_id
subscription_id = get_subscription_id(cmd.cli_ctx)
resource_group = namespace.resource_group_name
names_or_ids = getattr(namespace, dest)
is_list = True
# treat single values as a list, but convert back in the end
if not isinstance(names_or_ids, list):
is_list = False
names_or_ids = [names_or_ids]
if names_or_ids == [None] or not names_or_ids:
return
ids = []
for val in names_or_ids:
id_params = {
'subscription': subscription_id,
'resource_group': resource_group,
'namespace': resource_namespace,
'type': resource_type,
'name': getattr(namespace, resource_name_dest) if child_type else val,
'child_type_1': child_type,
'child_name_1': val if child_type else None
}
if not is_valid_resource_id(val):
val = resource_id(**id_params)
ids.append(val)
setattr(namespace, dest, ids if is_list else ids[0])
return _validate_name_or_id
def validate_waf_policy(cmd, namespace):
get_name_or_id_validator(
dest='waf_policy',
resource_type='WebApplicationFirewallPolicy'
)(cmd, namespace)
def validate_keyvault(cmd, namespace):
get_name_or_id_validator(
dest='vault',
resource_type='vaults',
resource_namespace='Microsoft.Keyvault'
)(cmd, namespace)
def validate_load_balancing_settings(cmd, namespace):
get_name_or_id_validator('load_balancing_settings', 'loadBalancingSettings')(cmd, namespace)
def validate_probe_settings(cmd, namespace):
get_name_or_id_validator('probe_settings', 'healthProbeSettings')(cmd, namespace)
def validate_frontend_endpoints(cmd, namespace):
get_name_or_id_validator('frontend_endpoints', 'frontendEndpoints')(cmd, namespace)
def validate_backend_pool(cmd, namespace):
get_name_or_id_validator('backend_pool', 'backendPools')(cmd, namespace)
def validate_rules_engine(cmd, namespace):
get_name_or_id_validator('rules_engine', 'rulesEngines')(cmd, namespace)
# pylint: disable=protected-access
class MatchConditionAction(argparse._AppendAction):
# pylint: disable=no-self-use
def parse_match_condition(self, values):
from azext_front_door.vendored_sdks.models import MatchCondition
if not isinstance(values, list):
values = values.split(' ')
try:
return MatchCondition(
match_variable=values[0],
operator=values[1],
match_value=values[2:]
)
except IndexError:
from knack.util import CLIError
raise CLIError('usage error: --match-condition VARIABLE OPERATOR [VALUE [VALUE ...]]')
def __call__(self, parser, namespace, values, option_string=None):
match_condition = self.parse_match_condition(values)
super(MatchConditionAction, self).__call__(parser, namespace, match_condition, option_string)
|
mermaid/utils.py | HastingsGreer/mermaid | 120 | 3691 | <gh_stars>100-1000
"""Various utility functions.
.. todo::
Reorganize this package in a more meaningful way.
"""
from __future__ import print_function
from __future__ import absolute_import
# from builtins import str
# from builtins import range
import torch
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from .libraries.modules.stn_nd import STN_ND_BCXYZ
from .data_wrapper import AdaptVal
from .data_wrapper import MyTensor
from . import smoother_factory as sf
from .data_wrapper import USE_CUDA
import numpy as np
from . import finite_differences as fd
import torch.nn as nn
import torch.nn.init as init
from . import module_parameters as pars
from .spline_interpolation import SplineInterpolation_ND_BCXYZ
import os
try:
from .libraries.functions.nn_interpolation import get_nn_interpolation
except ImportError:
print('WARNING: nn_interpolation could not be imported (only supported in CUDA at the moment). '
'Some functionality may not be available.')
def my_hasnan(x):
"""Check if any input elements are NaNs.
:param x: numpy array
:return: True if NaNs are present, False else
"""
return (x != x).any()
def create_symlink_with_correct_ext(sf, tf):
abs_s = os.path.abspath(sf)
ext_s = os.path.splitext(abs_s)[1]
abs_t = os.path.abspath(tf)
root_t,ext_t = os.path.splitext(abs_t)
abs_t_with_right_ext = root_t + ext_s
if os.path.isfile(abs_t_with_right_ext):
if os.path.samefile(abs_s,abs_t_with_right_ext):
# nothing to do here, these are already the same file
return
else:
os.remove(abs_t_with_right_ext)
# now we can do the symlink
os.symlink(abs_s,abs_t_with_right_ext)
def combine_dict(d1,d2):
"""Creates a dictionary which has entries from both of them.
:param d1: dictionary 1
:param d2: dictionary 2
:return: resulting dictionary
"""
d = d1.copy()
d.update(d2)
return d
def get_parameter_list_from_parameter_dict(pd):
"""Takes a dictionary which contains key value pairs for model parameters and converts it into a list of
parameters that can be used as an input to an optimizer.
:param pd: parameter dictionary
:return: list of parameters
"""
pl = []
for key in pd:
pl.append(pd[key])
return pl
def get_parameter_list_and_par_to_name_dict_from_parameter_dict(pd):
"""Same as get_parameter_list_from_parameter_dict; but also returns a dictionary which keeps track of the keys
based on memory id.
:param pd: parameter dictionary
:return: tuple of (parameter_list, name_dictionary)
"""
par_to_name_dict = dict()
pl = []
for key in pd:
pl.append(pd[key])
par_to_name_dict[pd[key]] = key
return pl, par_to_name_dict
def remove_infs_from_variable(v):
# 32 - bit floating point: torch.FloatTensor, torch.cuda.FloatTensor
# 64 - bit floating point: torch.DoubleTensor, torch.cuda.DoubleTensor
# 16 - bit floating point: torch.HalfTensor, torch.cuda.HalfTensor
# todo: maybe find a cleaner way of handling this
# this is to make sure that subsequent sums work (hence will be smaller than it could be,
# but values of this size should not occur in practice anyway
sz = v.size()
reduction_factor = np.prod(np.array(sz))
condition = True
if type(v.data) == torch.cuda.FloatTensor or v.data.dtype==torch.float32:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float32').min))/reduction_factor,
max=(np.asscalar(np.finfo('float32').max))/reduction_factor)
elif v.data.dtype == torch.DoubleTensor or type(v.data) == torch.cuda.DoubleTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float64').min))/reduction_factor,
max=(np.asscalar(np.finfo('float64').max))/reduction_factor)
elif v.data.dtype == torch.HalfTensor or type(v.data) == torch.cuda.HalfTensor:
return torch.clamp(v,
min=(np.asscalar(np.finfo('float16').min))/reduction_factor,
max=(np.asscalar(np.finfo('float16').max))/reduction_factor)
else:
raise ValueError('Unknown data type: ' + str( type(v.data)))
def lift_to_dimension(A, dim):
"""Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim > dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim == dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape))
def get_dim_of_affine_transform(Ab):
"""Returns the number of dimensions corresponding to an affine transformation of the
form y=Ax+b stored in a column vector. For A =[a1,a2,a3], the parameter vector is simply
[a1;a2;a3;b], i.e., all columns stacked on top of each other.
:param Ab: parameter vector
:return: dimensionality of transform (1,2,or 3)
"""
nr = len(Ab)
if nr==2:
return 1
elif nr==6:
return 2
elif nr==12:
return 3
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity(Ab):
"""Sets the affine transformation as given by the column vector Ab to the identity transform.
:param Ab: Affine parameter vector (will be overwritten with the identity transform)
:return:
"""
dim = get_dim_of_affine_transform(Ab)
if dim==1:
Ab.zero_()
Ab[0]=1.
elif dim==2:
Ab.zero_()
Ab[0]=1.
Ab[3]=1.
elif dim==3:
Ab.zero_()
Ab[0]=1.
Ab[4]=1.
Ab[8]=1.
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
def set_affine_transform_to_identity_multiN(Ab):
"""Set the affine transforms to the identity (in the case of arbitrary batch size).
:param Ab: Parameter vectors B x pars (batch size x param. vector); will be overwritten with identity trans.
:return:
"""
sz = Ab.size()
nr_of_images = sz[0]
for nrI in range(nr_of_images):
set_affine_transform_to_identity(Ab[nrI, :])
def get_inverse_affine_param(Ab):
"""Computes inverse of affine transformation.
Formally: C(Ax+b)+d = CAx+Cb+d = x; C = inv(A), d = -Cb
:param Ab: B x pars (batch size x param. vector)
:return: Inverse of affine parameters
"""
dim =0
if Ab.shape[1] == 2:
dim = 1
elif Ab.shape[1] == 6:
dim = 2
elif Ab.shape[1] == 12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1,2)
Ab_inv = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_inv = torch.inverse(Ab[n, :, :dim])
Ab_inv[n, :, :dim] = tm_inv
Ab_inv[n, :, dim] = - torch.matmul(tm_inv, Ab[n,:,dim])
inv_affine_param = Ab_inv.transpose(1, 2).contiguous().view(Ab.shape[0], -1)
return inv_affine_param
def update_affine_param(Ab, Cd):
"""Update affine parameters.
Formally: C(Ax+b)+d = CAx+Cb+d
:param Ab: B x pars (batch size x param. vector)
:return: Updated affine parameters
"""
dim = 0
if Ab.shape[1]==2:
dim = 1
elif Ab.shape[1]==6:
dim = 2
elif Ab.shape[1]==12:
dim = 3
if dim not in [1, 2, 3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
Ab = Ab.view(Ab.shape[0], dim+1, dim).transpose(1, 2)
Cd = Cd.view(Cd.shape[0], dim+1, dim).transpose(1, 2)
updated_param = torch.zeros_like(Ab)
for n in range(Ab.shape[0]):
tm_param = torch.matmul(Cd[n,:,:dim],Ab[n,:,:dim])
updated_param[n,:,:dim] = tm_param
updated_param[n,:,dim] = torch.matmul(Cd[n,:,:dim], Ab[n,:,dim]) +Cd[n,:,dim]
updated_param = updated_param.transpose(1,2).contiguous().view(Ab.shape[0],-1)
return updated_param
def apply_affine_transform_to_map(Ab,phi):
"""Applies an affine transform to a map.
:param Ab: affine transform parameter column vector
:param phi: map; format nrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed map
"""
sz = phi.size()
dim = len(sz) - 1
if dim not in [1,2,3]:
raise ValueError('Only supports dimensions 1, 2, and 3.')
phiR = MyTensor(sz).zero_().type_as(phi)
if dim == 1:
phiR = phi * Ab[0] + Ab[1]
elif dim == 2:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[2] * phi[1, ...] + Ab[4] # a_11x+a_21y+b1
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[5] # a_12x+a_22y+b2
elif dim == 3:
phiR[0, ...] = Ab[0] * phi[0, ...] + Ab[3] * phi[1, ...] + Ab[6] * phi[2, ...] + Ab[9]
phiR[1, ...] = Ab[1] * phi[0, ...] + Ab[4] * phi[1, ...] + Ab[7] * phi[2, ...] + Ab[10]
phiR[2, ...] = Ab[2] * phi[0, ...] + Ab[5] * phi[1, ...] + Ab[8] * phi[2, ...] + Ab[11]
else:
raise ValueError('Only supports dimensions 1, 2, and 3.')
return phiR
def apply_affine_transform_to_map_multiNC(Ab,phi):
"""Applies an affine transform to maps (for arbitrary batch size).
:param Ab: affine transform parameter column vectors (batch size x param. vector)
:param phi: maps; format batchxnrCxXxYxZ (nrC corresponds to dimension)
:return: returns transformed maps
"""
sz = phi.size()
dim = get_dim_of_affine_transform(Ab[0,:])
nr_of_images = Ab.size()[0]
if nr_of_images != sz[0]:
raise ValueError('Incompatible number of affine transforms')
if dim != len(sz)-2:
raise ValueError('Incompatible number of affine transforms')
phiR = MyTensor(sz).zero_().type_as(phi)
for nrI in range(nr_of_images):
phiR[nrI, ...] = apply_affine_transform_to_map(Ab[nrI, :], phi[nrI, ...])
return phiR
def compute_normalized_gaussian(X, mu, sig):
"""Computes a normalized Gaussian.
:param X: map with coordinates at which to evaluate
:param mu: array indicating the mean
:param sig: array indicating the standard deviations for the different dimensions
:return: Normalized Gaussian evaluated at coordinates in X
Example::
>>> mu, sig = [1,1], [1,1]
>>> X = [0,0]
>>> print(compute_normalized_gaussian(X, mu, sig)
"""
dim = len(mu)
if dim == 1:
g = np.exp(-np.power(X[0, :] - mu[0], 2.)/(2*np.power(sig[0], 2.)))
g = g/g.sum()
return g
elif dim == 2:
g = np.exp(-np.power(X[0,:,:]-mu[0],2.)/(2*np.power(sig[0],2.))
- np.power(X[1,:, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.)))
g = g/g.sum()
return g
elif dim == 3:
g = np.exp(-np.power(X[0,:, :, :] - mu[0], 2.) / (2 * np.power(sig[0], 2.))
-np.power(X[1,:, :, :] - mu[1], 2.) / (2 * np.power(sig[1], 2.))
-np.power(X[2,:, :, :] - mu[2], 2.) / (2 * np.power(sig[2], 2.)))
g = g / g.sum()
return g
else:
raise ValueError('Can only compute Gaussians in dimensions 1-3')
def _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary=False,use_01_input=True):
if spline_order not in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]:
raise ValueError('Currently only orders 0 to 9 are supported')
if spline_order == 0:
# return get_warped_label_map(I0,phi,spacing)
stn = STN_ND_BCXYZ(spacing,
zero_boundary,
use_bilinear=False,
use_01_input=use_01_input)
elif spline_order == 1:
stn = STN_ND_BCXYZ(spacing,zero_boundary,
use_bilinear=True,
use_01_input=use_01_input)
else:
stn = SplineInterpolation_ND_BCXYZ(spacing,
spline_order)
I1_warped = stn(I0, phi)
return I1_warped
def compute_warped_image(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size XxYxZ
:param phi: map for the warping, size dimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size XxYxZ
"""
# implements this by creating a different view (effectively adding dimensions)
Iw = compute_warped_image_multiNC(I0.view(torch.Size([1, 1] + list(I0.size()))),
phi.view(torch.Size([1] + list(phi.size()))),
spacing,
spline_order,
zero_boundary,
use_01_input)
return Iw.view(I0.size())
def compute_warped_image_multiNC(I0, phi, spacing, spline_order, zero_boundary=False, use_01_input=True):
"""Warps image.
:param I0: image to warp, image size BxCxXxYxZ
:param phi: map for the warping, size BxdimxXxYxZ
:param spacing: image spacing [dx,dy,dz]
:return: returns the warped image of size BxCxXxYxZ
"""
dim = I0.dim()-2
if dim == 1:
return _compute_warped_image_multiNC_1d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 2:
return _compute_warped_image_multiNC_2d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
elif dim == 3:
return _compute_warped_image_multiNC_3d(I0, phi, spacing, spline_order,zero_boundary,use_01_input=use_01_input)
else:
raise ValueError('Images can only be warped in dimensions 1 to 3')
def _get_low_res_spacing_from_spacing(spacing, sz, lowResSize):
"""Computes spacing for the low-res parametrization from image spacing.
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
#todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::])-1) / (np.array(lowResSize[2::])-1)
def _get_low_res_size_from_size(sz, factor):
"""Returns the corresponding low-res size from a (high-res) sz.
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None) or (factor >= 1):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return np.array(sz)
else:
low_res_sz = np.array(sz)
low_res_sz[2::] = (np.ceil((np.array(sz[2::]) * factor))).astype('int16')
return low_res_sz
def _compute_low_res_image(I, spacing, low_res_size, spline_order):
import mermaid.image_sampling as IS
sampler = IS.ResampleImage()
low_res_image, _ = sampler.downsample_image_to_size(I, spacing, low_res_size[2::],spline_order)
return low_res_image
def individual_parameters_to_model_parameters(ind_pars):
model_pars = dict()
if type(ind_pars) == type(dict()):
# should already be in the right format
model_pars = ind_pars
else:
# if ind_pars is not a dictionary assume that they come from the optimizer
# (i.e., list and each list element has a dictionary with keys 'name' and 'model_params'
for par in ind_pars:
model_pars[par['name']] = par['model_params']
return model_pars
def compute_vector_momentum_from_scalar_momentum_multiNC(lam, I, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, BxCxXxYxZ
:param I: image, BxCxXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
nrOfI = sz[0] # number of images
m = create_ND_vector_field_variable_multiN(sz[2::], nrOfI) # attention that the second dimension here is image dim, not nrOfC
nrOfC = sz[1]
for c in range(nrOfC): # loop over all the channels and add the results
m = m + compute_vector_momentum_from_scalar_momentum_multiN(lam[:, c, ...],
I[:, c, ...],
nrOfI,
sz[2::],
spacing)
return m
def compute_vector_momentum_from_scalar_momentum_multiN(lam, I, nrOfI, sz, spacing):
"""Computes the vector momentum from the scalar momentum: :math:`m=\\lambda\\nabla I`.
:param lam: scalar momentum, batchxXxYxZ
:param I: image, batchXxYxZ
:param sz: size of image
:param spacing: spacing of image
:return: returns the vector momentum
"""
fdt = fd.FD_torch(spacing)
dim = len(sz)
m = create_ND_vector_field_variable_multiN(sz, nrOfI)
if dim == 1:
m[:, 0, :] = fdt.dXc(I)*lam
elif dim == 2:
m[:, 0, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :] = fdt.dYc(I)*lam
elif dim == 3:
m[:, 0, :, :, :] = fdt.dXc(I)*lam
m[:, 1, :, :, :] = fdt.dYc(I)*lam
m[:, 2, :, :, :] = fdt.dZc(I)*lam
else:
raise ValueError('Can only convert scalar to vector momentum in dimensions 1-3')
return m
def create_ND_vector_field_variable_multiN(sz, nr_of_images=1):
"""
Create vector field torch Variable of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nr_of_images, dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0., 1e-7)
def create_ND_vector_field_variable(sz):
"""Create vector field torch Variable of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:return: returns vector field of size dimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([dim]+list(csz))
return MyTensor(*(csz.tolist())).normal_(0.,1e-7)
def create_vector_parameter(nr_of_elements):
"""Creates a vector parameters with a specified number of elements.
:param nr_of_elements: number of vector elements
:return: returns the parameter vector
"""
return Parameter(MyTensor(nr_of_elements).normal_(0., 1e-7))
def create_ND_vector_field_parameter_multiN(sz, nrOfI=1,get_field_from_external_network=False):
"""Create vector field torch Parameter of given size.
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
dim = len(sz)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI, dim]+list(csz))
if get_field_from_external_network:
tmp = MyTensor(*(csz.tolist())).normal_(0.,1e-7)
tmp.requires_grad = True
else:
tmp = Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
return tmp
def create_local_filter_weights_parameter_multiN(sz,gaussian_std_weights, nrOfI=1,sched='w_K_w',get_preweight_from_network=False):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:return: returns vector field of size nrOfIxdimxXxYxZ
"""
nr_of_mg_weights = len(gaussian_std_weights)
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nr_of_mg_weights]+list(csz))
weights = torch.empty(*csz)
# set the default
if sched =='w_K_w':
gaussian_std_weights = [torch.sqrt(std_w) for std_w in gaussian_std_weights]
for g in range(nr_of_mg_weights):
weights[:, g, ...] = gaussian_std_weights[g]
tmp = AdaptVal(weights)
if get_preweight_from_network:
tmp.requires_grad = True
else:
tmp = Parameter(tmp)
return tmp
def create_ND_scalar_field_parameter_multiNC(sz, nrOfI=1, nrOfC=1):
"""
Create vector field torch Parameter of given size
:param sz: just the spatial sizes (e.g., [5] in 1D, [5,10] in 2D, [5,10,10] in 3D)
:param nrOfI: number of images
:param nrOfC: number of channels
:return: returns vector field of size nrOfIxnrOfCxXxYxZ
"""
csz = np.array(sz) # just to make sure it is a numpy array
csz = np.array([nrOfI,nrOfC]+list(csz))
return Parameter(MyTensor(*(csz.tolist())).normal_(0.,1e-7))
def centered_identity_map_multiN(sz, spacing, dtype='float32'):
"""
Create a centered identity map (shifted so it is centered around 0)
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz) - 2
nrOfI = sz[0]
if dim == 1:
id = np.zeros([nrOfI, 1, sz[2]], dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI, 2, sz[2], sz[3]], dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI, 3, sz[2], sz[3], sz[4]], dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n, ...] = centered_identity_map(sz[2::], spacing,dtype=dtype)
return id
def identity_map_multiN(sz,spacing,dtype='float32'):
"""
Create an identity map
:param sz: size of an image in BxCxXxYxZ format
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map
"""
dim = len(sz)-2
nrOfI = int(sz[0])
if dim == 1:
id = np.zeros([nrOfI,1,sz[2]],dtype=dtype)
elif dim == 2:
id = np.zeros([nrOfI,2,sz[2],sz[3]],dtype=dtype)
elif dim == 3:
id = np.zeros([nrOfI,3,sz[2],sz[3],sz[4]],dtype=dtype)
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
for n in range(nrOfI):
id[n,...] = identity_map(sz[2::],spacing,dtype=dtype)
return id
def centered_identity_map(sz, spacing, dtype='float32'):
"""
Returns a centered identity map (with 0 in the middle) if the sz is odd
Otherwise shifts everything by 0.5*spacing
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim == 1:
id = np.mgrid[0:sz[0]]
elif dim == 2:
id = np.mgrid[0:sz[0], 0:sz[1]]
elif dim == 3:
id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array(id.astype(dtype))
if dim == 1:
id = id.reshape(1, sz[0]) # add a dummy first index
for d in range(dim):
id[d] *= spacing[d]
if sz[d]%2==0:
#even
id[d] -= spacing[d]*(sz[d]//2)
else:
#odd
id[d] -= spacing[d]*((sz[d]+1)//2)
# and now store it in a dim+1 array
if dim == 1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0, :] = id[0]
elif dim == 2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0, :, :] = id[0]
idnp[1, :, :] = id[1]
elif dim == 3:
idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0, :, :, :] = id[0]
idnp[1, :, :, :] = id[1]
idnp[2, :, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
return idnp
#
# def centered_min_normalized_identity_map(sz, spacing, dtype='float32'):
# """
# Returns a centered identity map (with 0 in the middle) if the sz is odd
# Otherwise shifts everything by 0.5*spacing
#
# :param sz: just the spatial dimensions, i.e., XxYxZ
# :param spacing: list with spacing information [sx,sy,sz]
# :param dtype: numpy data-type ('float32', 'float64', ...)
# :return: returns the identity map of dimension dimxXxYxZ
# """
# dim = len(sz)
# if dim == 1:
# id = np.mgrid[0:sz[0]]
# elif dim == 2:
# id = np.mgrid[0:sz[0], 0:sz[1]]
# elif dim == 3:
# id = np.mgrid[0:sz[0], 0:sz[1], 0:sz[2]]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
#
# min_spacing = np.min(spacing)
# spacing_ratio = spacing/min_spacing
#
#
# # now get it into range [0,(sz-1)*spacing]^d
# id = np.array(id.astype(dtype))
# if dim == 1:
# id = id.reshape(1, sz[0]) # add a dummy first index
#
# for d in range(dim):
# id[d] *= spacing[d]
# if sz[d]%2==0:
# #even
# id[d] -= spacing[d]*(sz[d]//2)
# else:
# #odd
# id[d] -= spacing[d]*((sz[d]+1)//2)
#
# # and now store it in a dim+1 array and rescale by the ratio
# if dim == 1:
# idnp = np.zeros([1, sz[0]], dtype=dtype)
# idnp[0, :] = id[0] * spacing_ratio[0]
# elif dim == 2:
# idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
# idnp[0, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :] = id[1] * spacing_ratio[1]
# elif dim == 3:
# idnp = np.zeros([3, sz[0], sz[1], sz[2]], dtype=dtype)
# idnp[0, :, :, :] = id[0] * spacing_ratio[0]
# idnp[1, :, :, :] = id[1] * spacing_ratio[1]
# idnp[2, :, :, :] = id[2] * spacing_ratio[2]
# else:
# raise ValueError('Only dimensions 1-3 are currently supported for the centered identity map')
#
# return idnp
#
# def tranfrom_var_list_into_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =min_spacing/spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
# def recover_var_list_from_min_normalized_space(var_list,spacing,do_transform=True):
# if do_transform:
# min_spacing = np.min(spacing)
# spacing_ratio =spacing/min_spacing
# dim = spacing.size
# spacing_ratio_t = AdaptVal(torch.Tensor(spacing_ratio))
# sp_sz = [1]+[dim] +[1]*dim
# spacing_ratio_t = spacing_ratio_t.view(*sp_sz)
# new_var_list = [var*spacing_ratio_t if var is not None else None for var in var_list]
# else:
# new_var_list = var_list
# return new_var_list
#
def identity_map(sz,spacing,dtype='float32'):
"""
Returns an identity map.
:param sz: just the spatial dimensions, i.e., XxYxZ
:param spacing: list with spacing information [sx,sy,sz]
:param dtype: numpy data-type ('float32', 'float64', ...)
:return: returns the identity map of dimension dimxXxYxZ
"""
dim = len(sz)
if dim==1:
id = np.mgrid[0:sz[0]]
elif dim==2:
id = np.mgrid[0:sz[0],0:sz[1]]
elif dim==3:
id = np.mgrid[0:sz[0],0:sz[1],0:sz[2]]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
# now get it into range [0,(sz-1)*spacing]^d
id = np.array( id.astype(dtype) )
if dim==1:
id = id.reshape(1,sz[0]) # add a dummy first index
for d in range(dim):
id[d]*=spacing[d]
#id[d]*=2./(sz[d]-1)
#id[d]-=1.
# and now store it in a dim+1 array
if dim==1:
idnp = np.zeros([1, sz[0]], dtype=dtype)
idnp[0,:] = id[0]
elif dim==2:
idnp = np.zeros([2, sz[0], sz[1]], dtype=dtype)
idnp[0,:, :] = id[0]
idnp[1,:, :] = id[1]
elif dim==3:
idnp = np.zeros([3,sz[0], sz[1], sz[2]], dtype=dtype)
idnp[0,:, :, :] = id[0]
idnp[1,:, :, :] = id[1]
idnp[2,:, :, :] = id[2]
else:
raise ValueError('Only dimensions 1-3 are currently supported for the identity map')
return idnp
def omt_boundary_weight_mask(img_sz,spacing,mask_range=5,mask_value=5,smoother_std =0.05):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.ones(*mask_sz))*mask_value
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
return mask.detach()
def momentum_boundary_weight_mask(img_sz,spacing,mask_range=5,smoother_std =0.05,pow=2):
"""generate a smooth weight mask for the omt """
dim = len(img_sz)
mask_sz = [1,1]+ list(img_sz)
mask = AdaptVal(torch.zeros(*mask_sz))
if dim ==2:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range]=1
elif dim==3:
mask[:,:,mask_range:-mask_range,mask_range:-mask_range,mask_range:-mask_range ]=1
sm = get_single_gaussian_smoother(smoother_std,img_sz,spacing)
mask = sm.smooth(mask)
if pow ==2:
mask = mask**2
if pow ==3:
mask = mask*mask*mask
return mask
# def compute_omt_const(stds,param,dim):
# omt_power = param['forward_model']['smoother']['omt_power']
# omt_weight_penalty = param['forward_model']['smoother']['omt_weight_penalty']
# min_std = torch.min(stds)
# max_std = torch.max(stds)
# omt_const = torch.abs(torch.log(max_std/stds))**omt_power
# omt_const = omt_const/(torch.abs(torch.log(max_std / min_std)) ** omt_power)
# omt_const = omt_const*omt_weight_penalty/(EV.reg_factor_in_mermaid*2)
# sz = [1]+ [len(stds)] +[1]*(dim+1)
# return omt_const.view(*sz)
def get_single_gaussian_smoother(gaussian_std,sz,spacing):
s_m_params = pars.ParameterDict()
s_m_params['smoother']['type'] = 'gaussian'
s_m_params['smoother']['gaussian_std'] = gaussian_std
s_m = sf.SmootherFactory(sz, spacing).create_smoother(s_m_params)
return s_m
def get_warped_label_map(label_map, phi, spacing, sched='nn'):
if sched == 'nn':
warped_label_map = compute_warped_image_multiNC(label_map, phi, spacing,spline_order=0,zero_boundary=True)
# check if here should be add assert
assert abs(torch.sum(warped_label_map.data -warped_label_map.data.round()))< 0.1, "nn interpolation is not precise"
else:
raise ValueError(" the label warping method is not implemented")
return warped_label_map
def t2np(v):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
return (v.detach()).cpu().numpy()
def cxyz_to_xyzc( v ):
"""
Takes a torch array and returns it as a numpy array on the cpu
:param v: torch array
:return: numpy array
"""
dim = len(v.shape)-2
if dim ==2:
v = v.permute(0,2,3,1)
if dim ==3:
v = v.permute(0,2,3,4,1)
return v
def get_scalar(v):
if isinstance(v, float):
return v
elif isinstance(v, np.ndarray) and v.size == 1:
return float(v)
def checkNan(x):
""""
input should be list of Variable
"""
return [len(np.argwhere(np.isnan(elem.detach().cpu().numpy()))) for elem in x]
def noramlized_spacing_to_smallest(spacing):
min_sp = np.min(spacing)
spacing[spacing>min_sp]=min_sp
return spacing
def time_warped_function(f):
def __time_warped_function(input=None):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
output = f(input)
end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
print(start.elapsed_time(end))
return output
return __time_warped_function
def interoplate_boundary_right(tensor):
dim = len(tensor.shape)-2
if dim==1:
tensor[:,:,-1]= tensor[:,:-2]+ tensor[:,:-2]-tensor[:,:-3]
if dim==2:
tensor[:, :, -1,:] = tensor[:, :,-2,:] + tensor[:, :,-2,:] - tensor[:, :,-3,:]
tensor[:, :, :,-1] = tensor[:, :, :,-2] + tensor[:, :, :,-2] - tensor[:, :, :,-3]
if dim==3:
tensor[:, :,:, -1,:, :] = tensor[:, :, -2, :] + tensor[:, :, -2, :] - tensor[:, :, -3, :]
tensor[:, :,:, :, -1, :] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
tensor[:, :,:, :, :, -1] = tensor[:, :, :, -2] + tensor[:, :, :, -2] - tensor[:, :, :, -3]
def get_resampled_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
:param I: B C X Y Z
:param spacing: spx spy spz
:param desiredSize: B C X Y Z
:param spline_order:
:param zero_boundary:
:param identity_map:
:return:
"""
if spacing is None:
img_sz = I.shape[2:]
spacing = 1. / (np.array(img_sz) - 1)
if identity_map is not None: # todo will remove, currently fix for symmetric training
if I.shape[0] != identity_map.shape[0]:
n_batch = I.shape[0]
desiredSize = desiredSize.copy()
desiredSize[0] = n_batch
identity_map = identity_map[:n_batch]
resampled, new_spacing = resample_image(I, spacing, desiredSize, spline_order=spline_order,
zero_boundary=zero_boundary, identity_map=identity_map)
return resampled
def resample_image(I, spacing, desiredSize, spline_order=1, zero_boundary=False, identity_map=None):
"""
Resample an image to a given desired size
:param I: Input image (expected to be of BxCxXxYxZ format)
:param spacing: array describing the spatial spacing
:param desiredSize: array for the desired size (excluding B and C, i.e, 1 entry for 1D, 2 for 2D, and 3 for 3D)
:return: returns a tuple: the downsampled image, the new spacing after downsampling
"""
desiredSize = desiredSize[2:]
is_numpy = False
if not isinstance(I, torch.Tensor):
I = torch.Tensor(I)
is_numpy = True
sz = np.array(list(I.size()))
# check that the batch size and the number of channels is the same
nrOfI = sz[0]
nrOfC = sz[1]
desiredSizeNC = np.array([nrOfI, nrOfC] + list(desiredSize))
newspacing = spacing * ((sz[2::].astype('float') - 1.) / (
desiredSizeNC[2::].astype('float') - 1.)) ###########################################
if identity_map is not None:
idDes = identity_map
else:
idDes = AdaptVal(torch.from_numpy(identity_map_multiN(desiredSizeNC, newspacing)))
# now use this map for resampling
ID = compute_warped_image_multiNC(I, idDes, newspacing, spline_order, zero_boundary)
return ID if not is_numpy else ID.numpy(), newspacing
def get_res_size_from_size(sz, factor):
"""
Returns the corresponding low-res size from a (high-res) sz
:param sz: size (high-res)
:param factor: low-res factor (needs to be <1)
:return: low res size
"""
if (factor is None):
print('WARNING: Could not compute low_res_size as factor was ' + str(factor))
return sz
else:
lowResSize = np.array(sz)
if not isinstance(factor, list):
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * factor))).astype('int16')
else:
lowResSize[2::] = (np.ceil((np.array(sz[2:]) * np.array(factor)))).astype('int16')
if lowResSize[-1] % 2 != 0:
lowResSize[-1] -= 1
print(
'\n\nWARNING: forcing last dimension to be even: fix properly in the Fourier transform later!\n\n')
return lowResSize
def get_res_spacing_from_spacing(spacing, sz, lowResSize):
"""
Computes spacing for the low-res parameterization from image spacing
:param spacing: image spacing
:param sz: size of image
:param lowResSize: size of low re parameterization
:return: returns spacing of low res parameterization
"""
# todo: check that this is the correct way of doing it
return spacing * (np.array(sz[2::]) - 1) / (np.array(lowResSize[2::]) - 1)
########################################## Adaptive Net ###################################################3
def space_normal(tensors, std=0.1):
"""
space normalize for the net kernel
:param tensor:
:param mean:
:param std:
:return:
"""
if isinstance(tensors, Variable):
space_normal(tensors.data, std=std)
return tensors
for n in range(tensors.size()[0]):
for c in range(tensors.size()[1]):
dim = tensors[n][c].dim()
sz = tensors[n][c].size()
mus = np.zeros(dim)
stds = std * np.ones(dim)
print('WARNING: What should the spacing be here? Needed for new identity map code')
raise ValueError('Double check the spacing here before running this code')
spacing = np.ones(dim)
centered_id = centered_identity_map(sz,spacing)
g = compute_normalized_gaussian(centered_id, mus, stds)
tensors[n,c] = torch.from_numpy(g)
def weights_init_uniform(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.uniform(m.weight.data, 0.038, 0.042)
elif classname.find('Linear') != -1:
init.uniform(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
space_normal(m.weight.data)
elif classname.find('Linear') != -1:
space_normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_rd_normal(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.normal(m.weight.data)
elif classname.find('Linear') != -1:
init.normal(m.weight.data)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_xavier(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.xavier_normal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('Linear') != -1:
init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def weights_init_orthogonal(m):
classname = m.__class__.__name__
print(classname)
if classname.find('Conv') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('Linear') != -1:
init.orthogonal(m.weight.data, gain=1)
elif classname.find('BatchNorm2d') != -1:
init.uniform(m.weight.data, 1.0, 0.02)
init.constant(m.bias.data, 0.0)
def init_weights(net, init_type='normal'):
print('initialization method [%s]' % init_type)
if init_type == 'rd_normal':
net.apply(weights_init_rd_normal)
elif init_type == 'normal':
net.apply(weights_init_normal)
elif init_type == 'uniform':
net.apply(weights_init_uniform)
elif init_type == 'xavier':
net.apply(weights_init_xavier)
elif init_type == 'kaiming':
net.apply(weights_init_kaiming)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
def organize_data(moving, target, sched='depth_concat'):
if sched == 'depth_concat':
input = torch.cat([moving, target], dim=1)
elif sched == 'width_concat':
input = torch.cat((moving, target), dim=3)
elif sched == 'list_concat':
input = torch.cat((moving.unsqueeze(0),target.unsqueeze(0)),dim=0)
elif sched == 'difference':
input = moving-target
return input
def bh(m,gi,go):
print("Grad Input")
print((torch.sum(gi[0].data), torch.sum(gi[1].data)))
print("Grad Output")
print(torch.sum(go[0].data))
return gi[0], gi[1], gi[2]
class ConvBnRel(nn.Module):
# conv + bn (optional) + relu
def __init__(self, in_channels, out_channels, kernel_size, stride=1, active_unit='relu', same_padding=False,
bn=False, reverse=False, bias=False):
super(ConvBnRel, self).__init__()
padding = int((kernel_size - 1) // 2) if same_padding else 0
if not reverse:
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding, bias=bias)
else:
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding=padding,bias=bias)
#y = \frac{x - mean[x]}{ \sqrt{Var[x] + \epsilon}} * gamma + beta
#When affine=False the output of BatchNorm is equivalent to considering gamma=1 and beta=0 as constants.
self.bn = nn.BatchNorm2d(out_channels, eps=0.0001, momentum=0, affine=True) if bn else None
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class FcRel(nn.Module):
# fc+ relu(option)
def __init__(self, in_features, out_features, active_unit='relu'):
super(FcRel, self).__init__()
self.fc = nn.Linear(in_features, out_features)
if active_unit == 'relu':
self.active_unit = nn.ReLU(inplace=True)
elif active_unit == 'elu':
self.active_unit = nn.ELU(inplace=True)
else:
self.active_unit = None
def forward(self, x):
x = self.fc(x)
if self.active_unit is not None:
x = self.active_unit(x)
return x
class AdpSmoother(nn.Module):
"""
a simple conv. implementation, generate displacement field
"""
def __init__(self, inputs, dim, net_sched=None):
# settings should include [using_bias, using bn, using elu]
# inputs should be a dictionary could contain ['s'],['t']
super(AdpSmoother, self).__init__()
self.dim = dim
self.net_sched = 'm_only'
self.s = inputs['s'].detach()
self.t = inputs['t'].detach()
self.mask = Parameter(torch.cat([torch.ones(inputs['s'].size())]*dim, 1), requires_grad = True)
self.get_net_sched()
#self.net.register_backward_hook(bh)
def get_net_sched(self, debugging=True, using_bn=True, active_unit='relu', using_sigmoid=False , kernel_size=5):
# return the self.net and self.net_input
padding_size = (kernel_size-1)//2
if self.net_sched == 'm_only':
if debugging:
self.net = nn.Conv2d(2, 2, kernel_size, 1, padding=padding_size, bias=False,groups=2)
else:
net = \
[ConvBnRel(self.dim, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20,self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched =='m_f_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim +1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s':
if debugging:
self.net = nn.Conv2d(self.dim+1, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 1, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_f_s_t':
if debugging:
self.net = nn.Conv2d(self.dim+2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
elif self.net_sched == 'm_d_s_f_t':
if debugging:
self.net = nn.Conv2d(self.dim + 2, self.dim, kernel_size, 1, padding=padding_size, bias=False)
else:
net = \
[ConvBnRel(self.dim + 2, 20, 5, active_unit=active_unit, same_padding=True, bn=using_bn),
ConvBnRel(20, self.dim, 5, active_unit=active_unit, same_padding=True, bn=using_bn)]
if using_sigmoid:
net += [nn.Sigmoid()]
self.net = nn.Sequential(*net)
def prepare_data(self, m, new_s):
input=None
if self.net_sched == 'm_only':
input = m
elif self.net_sched == 'm_f_s':
input = organize_data(m,self.s,sched='depth_concat')
elif self.net_sched == 'm_d_s':
input = organize_data(m, new_s, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_f_s_t':
input = organize_data(m, self.s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
elif self.net_sched == 'm_d_s_f_t':
input = organize_data(m, new_s, sched='depth_concat')
input = organize_data(input, self.t, sched='depth_concat')
return input
def forward(self, m,new_s=None):
m = m * self.mask
input = self.prepare_data(m,new_s)
x= input
x = self.net(x)
return x
|
test/Fortran/fixture/myfortran_flags.py | moroten/scons | 1,403 | 3697 | <reponame>moroten/scons<gh_stars>1000+
import getopt
import sys
comment = ('#' + sys.argv[1]).encode()
opts, args = getopt.getopt(sys.argv[2:], 'cf:o:xy')
optstring = ''
length = len(comment)
for opt, arg in opts:
if opt == '-o': out = arg
elif opt not in ('-f', '-K'): optstring = optstring + ' ' + opt
infile = open(args[0], 'rb')
outfile = open(out, 'wb')
outfile.write((optstring + "\n").encode())
for l in infile.readlines():
if l[:length] != comment:
outfile.write(l)
sys.exit(0)
|
loss_fn/classification_loss_fns/binary_cross_entropy.py | apple/ml-cvnets | 209 | 3710 | <filename>loss_fn/classification_loss_fns/binary_cross_entropy.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from torch.nn import functional as F
from torch import Tensor
import argparse
from . import register_classification_loss_fn
from .. import BaseCriteria
@register_classification_loss_fn(name="binary_cross_entropy")
class ClsBinaryCrossEntropy(BaseCriteria):
"""Binary CE for classification tasks"""
def __init__(self, opts, *args, **kwargs) -> None:
super().__init__()
def forward(
self, input_sample: Tensor, prediction: Tensor, target: Tensor, *args, **kwargs
) -> Tensor:
if target.dim() != prediction.dim():
target = F.one_hot(target, num_classes=prediction.shape[-1])
return F.binary_cross_entropy_with_logits(
input=prediction,
target=target.to(prediction.dtype),
weight=None,
reduction="sum",
)
def __repr__(self) -> str:
return "{}()".format(self.__class__.__name__)
|
dnnlib/submission/submit.py | gperdrizet/gansformer | 1,172 | 3717 | <reponame>gperdrizet/gansformer
# Submit a function to be run either locally or in a computing cluster.
# Compared to original StyleGAN implementation, we extend the support for automatic training resumption,
# and network recompilation.
import copy
import inspect
import os
import pathlib
import pickle
import platform
import pprint
import re
import shutil
import sys
import time
import traceback
from enum import Enum
from .. import util
from ..util import EasyDict
from . import internal
class SubmitTarget(Enum):
# The target where the function should be run
# LOCAL: Run it locally
LOCAL = 1
class PathType(Enum):
# Determines in which format should a path be formatted
# WINDOWS: Format with Windows style
# LINUX: Format with Linux/Posix style
# AUTO: Use current OS type to select either WINDOWS or LINUX
WINDOWS = 1
LINUX = 2
AUTO = 3
class PlatformExtras:
# A mixed bag of values used by dnnlib heuristics
# Attributes:
# data_reader_buffer_size: Used by DataReader to size internal shared memory buffers
# data_reader_process_count: Number of worker processes to spawn (zero for single
# thread operation)
def __init__(self):
self.data_reader_buffer_size = 1<<30 # 1 GB
self.data_reader_process_count = 0 # single threaded default
_user_name_override = None
class SubmitConfig(util.EasyDict):
# Strongly typed config dict needed to submit runs
# Attributes:
# run_dir_root: Path to the run dir root. Can be optionally templated with tags
# Needs to always be run through get_path_from_template
# run_desc: Description of the run. Will be used in the run dir and task name
# run_dir_ignore: List of file patterns used to ignore files when copying files to the run dir
# run_dir_extra_files: List of (abs_path, rel_path) tuples of file paths. rel_path root will
# be the src directory inside the run dir
# submit_target: Submit target enum value. Used to select where the run is actually launched
# num_gpus: Number of GPUs used/requested for the run
# print_info: Whether to print debug information when submitting
# local.do_not_copy_source_files: Do not copy source files from the working directory to the
# run dir.
# run_id: Automatically populated value during submit
# run_name: Automatically populated value during submit
# run_dir: Automatically populated value during submit
# run_func_name: Automatically populated value during submit
# run_func_kwargs: Automatically populated value during submit
# user_name: Automatically populated value during submit. Can be set by the user which will then
# override the automatic value
# task_name: Automatically populated value during submit
# host_name: Automatically populated value during submit
# platform_extras: Automatically populated values during submit. Used by various dnnlib libraries
# such as the DataReader class
def __init__(self):
super().__init__()
# run (set these)
self.run_dir_root = "" # should always be passed through get_path_from_template
self.run_desc = ""
self.run_dir_ignore = ["__pycache__", "*.pyproj", "*.sln", "*.suo", ".cache", ".idea", ".vs",
".vscode", "_cudacache"]
self.run_dir_extra_files = []
# submit (set these)
self.submit_target = SubmitTarget.LOCAL
self.num_gpus = 1
self.print_info = False
self.nvprof = False
self.local = internal.local.TargetOptions()
self.datasets = []
# (automatically populated)
self.run_id = None
self.run_name = None
self.run_dir = None
self.run_func_name = None
self.run_func_kwargs = None
self.user_name = None
self.task_name = None
self.host_name = "localhost"
self.platform_extras = PlatformExtras()
def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:
# Replace tags in the given path template and return either Windows or Linux formatted path
# automatically select path type depending on running OS
if path_type == PathType.AUTO:
if platform.system() == "Windows":
path_type = PathType.WINDOWS
elif platform.system() == "Linux":
path_type = PathType.LINUX
else:
raise RuntimeError("Unknown platform")
path_template = path_template.replace("<USERNAME>", get_user_name())
# return correctly formatted path
if path_type == PathType.WINDOWS:
return str(pathlib.PureWindowsPath(path_template))
elif path_type == PathType.LINUX:
return str(pathlib.PurePosixPath(path_template))
else:
raise RuntimeError("Unknown platform")
def get_template_from_path(path: str) -> str:
# Convert a normal path back to its template representation
path = path.replace("\\", "/")
return path
def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:
# Convert a normal path to template and the convert it back to a normal path with given path type
path_template = get_template_from_path(path)
path = get_path_from_template(path_template, path_type)
return path
def set_user_name_override(name: str) -> None:
# Set the global username override value
global _user_name_override
_user_name_override = name
def get_user_name():
# Get the current user name
if _user_name_override is not None:
return _user_name_override
elif platform.system() == "Windows":
return os.getlogin()
elif platform.system() == "Linux":
try:
import pwd
return pwd.getpwuid(os.geteuid()).pw_name
except:
return "unknown"
else:
raise RuntimeError("Unknown platform")
def make_run_dir_path(*paths):
# Make a path/filename that resides under the current submit run_dir
# Args:
# *paths: Path components to be passed to os.path.join
# Returns:
# A file/dirname rooted at submit_config.run_dir. If there's no
# submit_config or run_dir, the base directory is the current
# working directory.
# E.g., `os.path.join(dnnlib.submit_config.run_dir, "output.txt"))`
import dnnlib
if (dnnlib.submit_config is None) or (dnnlib.submit_config.run_dir is None):
return os.path.join(os.getcwd(), *paths)
return os.path.join(dnnlib.submit_config.run_dir, *paths)
def _create_run_dir_local(submit_config: SubmitConfig, resume: bool, create_new: str) -> str:
# Create a new run dir with increasing ID number at the start
run_dir_root = get_path_from_template(submit_config.run_dir_root, PathType.AUTO)
if not os.path.exists(run_dir_root):
os.makedirs(run_dir_root)
run_dir = os.path.join(run_dir_root, submit_config.run_name)
if not resume:
if os.path.exists(run_dir) and create_new:
raise RuntimeError("The run dir already exists! ({0})".format(run_dir))
if not os.path.exists(run_dir):
os.makedirs(run_dir)
return run_dir
def _get_next_run_id_local(run_dir_root: str) -> int:
# Reads all directory names in a given directory (non-recursive) and returns the next (increasing) run id
# Assumes IDs are numbers at the start of the directory names
dir_names = [d for d in os.listdir(run_dir_root) if os.path.isdir(os.path.join(run_dir_root, d))]
r = re.compile("^\\d+") # match one or more digits at the start of the string
run_id = 0
for dir_name in dir_names:
m = r.match(dir_name)
if m is not None:
i = int(m.group())
run_id = max(run_id, i + 1)
return run_id
def _populate_run_dir(submit_config: SubmitConfig, run_dir: str) -> None:
# Copy all necessary files into the run dir. Assumes that the dir exists, is local, and is writable
pickle.dump(submit_config, open(os.path.join(run_dir, "submit_config.pkl"), "wb"))
with open(os.path.join(run_dir, "submit_config.txt"), "w") as f:
pprint.pprint(submit_config, stream = f, indent = 4, width = 200, compact = False)
if (submit_config.submit_target == SubmitTarget.LOCAL) and submit_config.local.do_not_copy_source_files:
return
files = []
run_func_module_dir_path = util.get_module_dir_by_obj_name(submit_config.run_func_name)
assert "." in submit_config.run_func_name
for _idx in range(submit_config.run_func_name.count(".") - 1):
run_func_module_dir_path = os.path.dirname(run_func_module_dir_path)
files += util.list_dir_recursively_with_ignore(run_func_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = False)
dnnlib_module_dir_path = util.get_module_dir_by_obj_name("dnnlib")
files += util.list_dir_recursively_with_ignore(dnnlib_module_dir_path, ignores = submit_config.run_dir_ignore, add_base_to_relative = True)
files += submit_config.run_dir_extra_files
files = [(f[0], os.path.join(run_dir, "src", f[1])) for f in files]
files += [(os.path.join(dnnlib_module_dir_path, "submission", "internal", "run.py"), os.path.join(run_dir, "run.py"))]
util.copy_files_and_create_dirs(files)
def run_wrapper(submit_config: SubmitConfig) -> None:
# Wrap the actual run function call for handling logging, exceptions, typing, etc
is_local = submit_config.submit_target == SubmitTarget.LOCAL
# when running locally, redirect stderr to stdout, log stdout to a file, and force flushing
if is_local:
logger = util.Logger(file_name = os.path.join(submit_config.run_dir, "log.txt"), file_mode="a", should_flush = True)
else: # when running in a cluster, redirect stderr to stdout, and just force flushing (log writing is handled by run.sh)
logger = util.Logger(file_name = None, should_flush = True)
import dnnlib
dnnlib.submit_config = submit_config
exit_with_errcode = False
try:
print("dnnlib: Running {0}() on {1}...".format(submit_config.run_func_name, submit_config.host_name))
start_time = time.time()
run_func_obj = util.get_obj_by_name(submit_config.run_func_name)
assert callable(run_func_obj)
sig = inspect.signature(run_func_obj)
if "submit_config" in sig.parameters:
run_func_obj(submit_config = submit_config, **submit_config.run_func_kwargs)
else:
run_func_obj(**submit_config.run_func_kwargs)
print("dnnlib: Finished {0}() in {1}.".format(submit_config.run_func_name, util.format_time(time.time() - start_time)))
except:
if is_local:
raise
else:
traceback.print_exc()
log_src = os.path.join(submit_config.run_dir, "log.txt")
log_dst = os.path.join(get_path_from_template(submit_config.run_dir_root), "{0}-error.txt".format(submit_config.run_name))
shutil.copyfile(log_src, log_dst)
# Defer sys.exit(1) to happen after we close the logs and create a _finished.txt
exit_with_errcode = True
finally:
open(os.path.join(submit_config.run_dir, "_finished.txt"), "w").close()
dnnlib.RunContext.get().close()
dnnlib.submit_config = None
logger.close()
# If we hit an error, get out of the script now and signal the error
# to whatever process that started this script.
if exit_with_errcode:
sys.exit(1)
return submit_config
def open_file_or_url(file_or_url):
if util.is_url(file_or_url):
return util.open_url(file_or_url, cache_dir = ".stylegan2-cache")
return open(file_or_url, "rb")
def load_pkl(file_or_url):
with open_file_or_url(file_or_url) as file:
return pickle.load(file, encoding = "latin1")
def submit_run(submit_config: SubmitConfig, run_func_name: str, create_newdir: bool = False,
resume: bool = False, load_config: bool = False, **run_func_kwargs) -> None:
# Create a run dir, gather files related to the run, copy files to the run dir, and launch the run in appropriate place.
# create_newdir: enforces the creation of a new run directory
# resume: resumes a prior experiment using its existing run directory
# load_config: in case resume = True, load prior experiment config instead of using the current command-line parameters
submit_config = copy.deepcopy(submit_config)
submit_target = submit_config.submit_target
farm = None
if submit_target == SubmitTarget.LOCAL:
farm = internal.local.Target()
assert farm is not None # unknown target
# Disallow submitting jobs with zero num_gpus
if (submit_config.num_gpus is None) or (submit_config.num_gpus == 0):
raise RuntimeError("submit_config.num_gpus must be set to a non-zero value")
if submit_config.user_name is None:
submit_config.user_name = get_user_name()
submit_config.run_func_name = run_func_name
submit_config.run_func_kwargs = run_func_kwargs
#--------------------------------------------------------------------
# Prepare submission by populating the run dir
#--------------------------------------------------------------------
host_run_dir = _create_run_dir_local(submit_config, resume, create_new = create_newdir)
submit_config.task_name = "{}-{:05d}-{}".format(submit_config.user_name, submit_config.run_id, submit_config.run_desc)
docker_valid_name_regex = "^[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
if not re.match(docker_valid_name_regex, submit_config.task_name):
raise RuntimeError("Invalid task name. Probable reason: unacceptable characters in your submit_config.run_desc. Task name must be accepted by the following regex: " + docker_valid_name_regex + ", got " + submit_config.task_name)
# Farm specific preparations for a submit
farm.finalize_submit_config(submit_config, host_run_dir)
# In case of resumption, load_config = True to load the prior submit_config file from the directory
# (so to maintain the original configuration of the experiment rather than the newly provided
# command-line arguments.
if load_config:
config_file = os.path.join(host_run_dir, "submit_config.pkl")
if os.path.exists(config_file):
old_submit_config = submit_config
submit_config = load_pkl(config_file)
submit_config["run_id"] = old_submit_config["run_id"]
submit_config["run_name"] = old_submit_config["run_name"]
if "resume_pkl" in old_submit_config["run_func_kwargs"]:
submit_config["run_func_kwargs"]["resume_pkl"] = old_submit_config["run_func_kwargs"]["resume_pkl"]
submit_config["run_func_kwargs"]["resume_kimg"] = old_submit_config["run_func_kwargs"]["resume_kimg"]
_populate_run_dir(submit_config, host_run_dir)
return farm.submit(submit_config, host_run_dir)
|
h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py | ahmedengu/h2o-3 | 6,098 | 3763 | <filename>h2o-docs/src/booklets/v2_2015/source/Python_Vignette_code_examples/python_combine_frames_append_one_as_columns.py
df8.cbind(df9)
# A B C D A0 B0 C0 D0
# ----- ------ ------ ------ ------ ----- ----- -----
# -0.09 0.944 0.160 0.271 -0.351 1.66 -2.32 -0.86
# -0.95 0.669 0.664 1.535 -0.633 -1.78 0.32 1.27
# 0.17 0.657 0.970 -0.419 -1.413 -0.51 0.64 -1.25
# 0.58 -0.516 -1.598 -1.346 0.711 1.09 0.05 0.63
# 1.04 -0.281 -0.411 0.959 -0.009 -0.47 0.41 -0.52
# 0.49 0.170 0.124 -0.170 -0.722 -0.79 -0.91 -2.09
# 1.42 -0.409 -0.525 2.155 -0.841 -0.19 0.13 0.63
# 0.94 1.192 -1.075 0.017 0.167 0.54 0.52 1.42
# -0.53 0.777 -1.090 -2.237 -0.693 0.24 -0.56 1.45
# 0.34 -0.456 -1.220 -0.456 -0.315 1.10 1.38 -0.05
#
# [100 rows x 8 columns] |
hatsploit/core/db/db.py | EntySec/HatSploit | 139 | 3842 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.config import Config
from hatsploit.lib.storage import LocalStorage
class DB:
badges = Badges()
config = Config()
local_storage = LocalStorage()
def disconnect_payload_database(self, name):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.local_storage.delete_element("connected_payload_databases", name)
self.local_storage.delete_element("payloads", name)
return
self.badges.print_error("No such payload database connected!")
def disconnect_module_database(self, name):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.local_storage.delete_element("connected_module_databases", name)
self.local_storage.delete_element("modules", name)
return
self.badges.print_error("No such module database connected!")
def disconnect_plugin_database(self, name):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.local_storage.delete_element("connected_plugin_databases", name)
self.local_storage.delete_element("plugins", name)
return
self.badges.print_error("No such plugin database connected!")
def connect_payload_database(self, name, path):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.badges.print_error("Payload database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a payload database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect payload database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "payloads":
self.badges.print_error("Not a payload database!")
return
del database['__database__']
payloads = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_payload_databases"):
self.local_storage.set("connected_payload_databases", {})
self.local_storage.update("connected_payload_databases", data)
if self.local_storage.get("payloads"):
self.local_storage.update("payloads", payloads)
else:
self.local_storage.set("payloads", payloads)
def connect_module_database(self, name, path):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.badges.print_error("Module database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a module database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect module database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "modules":
self.badges.print_error("Not a module database!")
return
del database['__database__']
modules = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_module_databases"):
self.local_storage.set("connected_module_databases", {})
self.local_storage.update("connected_module_databases", data)
if self.local_storage.get("modules"):
self.local_storage.update("modules", modules)
else:
self.local_storage.set("modules", modules)
def connect_plugin_database(self, name, path):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.badges.print_error("Plugin database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect plugin database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "plugins":
self.badges.print_error("Not a plugin database!")
return
del database['__database__']
plugins = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_plugin_databases"):
self.local_storage.set("connected_plugin_databases", {})
self.local_storage.update("connected_plugin_databases", data)
if self.local_storage.get("plugins"):
self.local_storage.update("plugins", plugins)
else:
self.local_storage.set("plugins", plugins)
|
etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py | IMULMUL/etl-parser | 104 | 3847 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-IPxlatCfg
GUID : 3e5ac668-af52-4c15-b99b-a3e7a6616ebd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1001, version=0)
class Microsoft_Windows_IPxlatCfg_1001_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1002, version=0)
class Microsoft_Windows_IPxlatCfg_1002_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1003, version=0)
class Microsoft_Windows_IPxlatCfg_1003_0(Etw):
pattern = Struct(
"InfoString" / CString
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1005, version=0)
class Microsoft_Windows_IPxlatCfg_1005_0(Etw):
pattern = Struct(
"IPv4Address" / Int32ul,
"IPv4Prefix" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1006, version=0)
class Microsoft_Windows_IPxlatCfg_1006_0(Etw):
pattern = Struct(
"InfoString" / CString,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1007, version=0)
class Microsoft_Windows_IPxlatCfg_1007_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1008, version=0)
class Microsoft_Windows_IPxlatCfg_1008_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"IPv4Address" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1009, version=0)
class Microsoft_Windows_IPxlatCfg_1009_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1010, version=0)
class Microsoft_Windows_IPxlatCfg_1010_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1011, version=0)
class Microsoft_Windows_IPxlatCfg_1011_0(Etw):
pattern = Struct(
"InfoString" / CString,
"MTU" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1101, version=0)
class Microsoft_Windows_IPxlatCfg_1101_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1102, version=0)
class Microsoft_Windows_IPxlatCfg_1102_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1103, version=0)
class Microsoft_Windows_IPxlatCfg_1103_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
|
semantic-python/test/fixtures/4-01-lambda-literals.py | Temurson/semantic | 8,844 | 3849 | <reponame>Temurson/semantic<gh_stars>1000+
# CHECK-TREE: { const <- \x -> \y -> x; y <- const #true #true; z <- const #false #false; #record { const: const, y : y, z: z, }}
const = lambda x, y: x
y = const(True, True)
z = const(False, False)
|
driver/python/setup.py | wbaweto/QConf | 2,056 | 3857 | <filename>driver/python/setup.py
from distutils.core import setup, Extension
setup(name = 'qconf_py', version = '1.2.2', ext_modules = [Extension('qconf_py', ['lib/python_qconf.cc'],
include_dirs=['/usr/local/include/qconf'],
extra_objects=['/usr/local/qconf/lib/libqconf.a']
)])
|
demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py | Natureshadow/OpenGoPro | 210 | 3859 | <filename>demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py<gh_stars>100-1000
# log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""
import csv
import time
import logging
import argparse
import threading
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, Tuple, Literal, List
from rich.console import Console
from open_gopro import GoPro
from open_gopro.constants import StatusId
from open_gopro.util import setup_logging, set_logging_level
logger = logging.getLogger(__name__)
console = Console() # rich consoler printer
BarsType = Literal[0, 1, 2, 3]
@dataclass
class Sample:
"""Simple class to store battery samples"""
index: int
percentage: int
bars: BarsType
def __post_init__(self) -> None:
self.time = datetime.now()
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"
SAMPLE_INDEX = 0
SAMPLES: List[Sample] = []
def dump_results_as_csv(location: Path) -> None:
"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""
console.print(f"Dumping results as CSV to {location}")
with open(location, mode="w") as f:
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["index", "time", "percentage", "bars"])
initial_time = SAMPLES[0].time
for s in SAMPLES:
w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars])
def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None:
"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""
last_percentage = initial_percentage
last_bars = initial_bars
while True:
# Block until we receive an update
notification = gopro.get_update()
# Update data points if they have changed
last_percentage = (
notification.data[StatusId.INT_BATT_PER]
if StatusId.INT_BATT_PER in notification.data
else last_percentage
)
last_bars = (
notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars
)
# Append and print sample
global SAMPLE_INDEX
SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars))
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
def main() -> int:
"""Main program functionality
Returns:
int: program return code
"""
identifier, log_location, poll = parse_arguments()
global logger
logger = setup_logging(logger, log_location)
global SAMPLE_INDEX
gopro: Optional[GoPro] = None
return_code = 0
try:
with GoPro(identifier, enable_wifi=False) as gopro:
set_logging_level(logger, logging.ERROR)
# # Setup notifications if we are not polling
if poll is None:
console.print("Configuring battery notifications...")
# Enable notifications of the relevant battery statuses. Also store initial values.
bars = gopro.ble_status.batt_level.register_value_update().flatten
percentage = gopro.ble_status.int_batt_per.register_value_update().flatten
# Start a thread to handle asynchronous battery level notifications
threading.Thread(
target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True
).start()
with console.status("[bold green]Receiving battery notifications until it dies..."):
# Sleep forever, allowing notification handler thread to deal with battery level notifications
while True:
time.sleep(1)
# Otherwise, poll
else:
with console.status("[bold green]Polling the battery until it dies..."):
while True:
SAMPLES.append(
Sample(
index=SAMPLE_INDEX,
percentage=gopro.ble_status.int_batt_per.get_value().flatten,
bars=gopro.ble_status.batt_level.get_value().flatten,
)
)
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
time.sleep(poll)
except Exception as e: # pylint: disable=broad-except
logger.error(repr(e))
return_code = 1
except KeyboardInterrupt:
logger.warning("Received keyboard interrupt. Shutting down...")
finally:
if len(SAMPLES) > 0:
csv_location = Path(log_location.parent) / "battery_results.csv"
dump_results_as_csv(csv_location)
if gopro is not None:
gopro.close()
console.print("Exiting...")
return return_code # pylint: disable=lost-exception
def parse_arguments() -> Tuple[str, Path, Optional[int]]:
"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""
parser = argparse.ArgumentParser(
description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-l",
"--log",
type=Path,
help="Location to store detailed log",
default="log_battery.log",
)
parser.add_argument(
"-p",
"--poll",
type=int,
help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.",
default=None,
)
args = parser.parse_args()
return args.identifier, args.log, args.poll
if __name__ == "__main__":
main()
|
neo/io/exampleio.py | Mario-Kart-Felix/python-neo | 199 | 3866 | <gh_stars>100-1000
"""
neo.io have been split in 2 level API:
* neo.io: this API give neo object
* neo.rawio: this API give raw data as they are in files.
Developper are encourage to use neo.rawio.
When this is done the neo.io is done automagically with
this king of following code.
Author: sgarcia
"""
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.examplerawio import ExampleRawIO
class ExampleIO(ExampleRawIO, BaseFromRaw):
name = 'example IO'
description = "Fake IO"
# This is an inportant choice when there are several channels.
# 'split-all' : 1 AnalogSignal each 1 channel
# 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename=''):
ExampleRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
|
scrapyproject/migrations/0003_auto_20170209_1025.py | sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI | 108 | 3867 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0002_auto_20170208_1738'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link_generator',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='scraper_function',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='settings',
field=models.TextField(blank=True),
),
]
|
covfefe/covfefe.py | fixator10/Trusty-cogs | 148 | 3872 | import re
import discord
from redbot.core import commands
class Covfefe(commands.Cog):
"""
Convert almost any word into covfefe
"""
def __init__(self, bot):
self.bot = bot
async def covfefe(self, x, k="aeiouy])"):
"""
https://codegolf.stackexchange.com/a/123697
"""
try:
b, c, v = re.findall(f"(.*?[{k}([^{k}.*?([{k}", x)[0]
return b + c + (("bcdfgkpstvz" + c)["pgtvkgbzdfs".find(c)] + v) * 2
except IndexError:
return None
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
@commands.command()
async def covefy(self, ctx, msg):
"""Convert almost any word into covfefe"""
newword = await self.covfefe(msg)
if newword is not None:
await ctx.send(newword)
else:
await ctx.send("I cannot covfefeify that word")
|
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py | yiwc/robotics-world | 681 | 3886 | <gh_stars>100-1000
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDialTurnEnvV2(SawyerXYZEnv):
TARGET_RADIUS = 0.07
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
obj_high = (0.1, 0.8, 0.0)
goal_low = (-0.1, 0.73, 0.0299)
goal_high = (0.1, 0.83, 0.0301)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.7, 0.0]),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.goal = np.array([0., 0.73, 0.08])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_dial.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward,
tcp_to_obj,
_,
target_to_obj,
object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
dial_center = self.get_body_com('dial').copy()
dial_angle_rad = self.data.get_joint_qpos('knob_Joint_1')
offset = np.array([
np.sin(dial_angle_rad),
-np.cos(dial_angle_rad),
0
])
dial_radius = 0.05
offset *= dial_radius
return dial_center + offset
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('dial')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.prev_obs = self._get_curr_obs_combined_no_goal()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy() + np.array([0, 0.03, 0.03])
self._target_pos = final_pos
self.sim.model.body_pos[self.model.body_name2id('dial')] = self.obj_init_pos
self.dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
return self._get_obs()
def compute_reward(self, action, obs):
obj = self._get_pos_objects()
dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.dial_push_position - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=abs(target_to_obj_init - self.TARGET_RADIUS),
sigmoid='long_tail',
)
dial_reach_radius = 0.005
tcp_to_obj = np.linalg.norm(dial_push_position - tcp)
tcp_to_obj_init = np.linalg.norm(self.dial_push_position - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, dial_reach_radius),
margin=abs(tcp_to_obj_init-dial_reach_radius),
sigmoid='gaussian',
)
gripper_closed = min(max(0, action[-1]), 1)
reach = reward_utils.hamacher_product(reach, gripper_closed)
tcp_opened = 0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
return (reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place)
|
tools/mo/openvino/tools/mo/ops/detection_output_onnx.py | ryanloney/openvino-1 | 1,127 | 3930 | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import dynamic_dimension_value, shape_array, set_input_shapes
from openvino.tools.mo.ops.op import Op
class ExperimentalDetectronDetectionOutput(Op):
op = 'ExperimentalDetectronDetectionOutput'
enabled = True
def __init__(self, graph, attrs):
mandatory_props = dict(
type=self.op,
op=self.op,
version='opset6',
infer=self.infer,
reverse_infer=self.reverse_infer,
type_infer=self.type_infer,
in_ports_count=4,
out_ports_count=3,
)
super().__init__(graph, mandatory_props, attrs)
def backend_attrs(self):
return [
('class_agnostic_box_regression', lambda node: str(bool(node['class_agnostic_box_regression'])).lower()),
'max_detections_per_image',
'nms_threshold',
'num_classes',
'post_nms_count',
'score_threshold',
'max_delta_log_wh',
('deltas_weights', lambda node: ','.join(map(str, node['deltas_weights'])))]
@staticmethod
def infer(node):
rois_num = node.max_detections_per_image
# boxes
node.out_port(0).data.set_shape([rois_num, 4])
# classes, scores, batch indices
# We use range(1, 1 + max(node.out_ports().keys())) instead of range(1, 3), because there are incorrectly
# generated models where ExperimentalDetectronDetectionOutput has 4 outputs.
for port_ind in range(1, 1 + max(node.out_ports().keys())):
if not node.out_port(port_ind).disconnected():
node.out_port(port_ind).data.set_shape([rois_num])
@staticmethod
def type_infer(node):
in_data_type = node.in_port(0).get_data_type()
node.out_port(0).set_data_type(in_data_type)
node.out_port(1).set_data_type(np.int32) # the second output contains class indices
node.out_port(2).set_data_type(in_data_type)
if node.is_out_port_connected(3):
node.out_port(3).set_data_type(np.int32) # the fourth output contains batch indices
@staticmethod
def reverse_infer(node):
set_input_shapes(node,
shape_array([dynamic_dimension_value, 4]),
shape_array([dynamic_dimension_value, node['num_classes'] * 4]),
shape_array([dynamic_dimension_value, node['num_classes']]),
shape_array([1, 3]))
|
volksdep/converters/__init__.py | repoww/volksdep | 271 | 3943 | from .torch2onnx import torch2onnx
from .onnx2trt import onnx2trt
from .torch2trt import torch2trt
from .base import load, save
|
MuonAnalysis/MomentumScaleCalibration/test/LikelihoodPdfDBReader_cfg.py | ckamtsikis/cmssw | 852 | 3969 | import FWCore.ParameterSet.Config as cms
process = cms.Process("LIKELIHOODPDFDBREADER")
# process.load("MuonAnalysis.MomentumScaleCalibration.local_CSA08_Y_cff")
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
process.load("Geometry.CommonTopologies.globalTrackingGeometry_cfi")
process.load("RecoMuon.DetLayers.muonDetLayerGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("RecoMuon.TrackingTools.MuonServiceProxy_cff")
# process.source = cms.Source("PoolSource",
# fileNames = cms.untracked.vstring()
# )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.poolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string('sqlite_file:dummy2.db'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('MuScleFitLikelihoodPdfRcd'),
tag = cms.string('MuScleFitLikelihoodPdf_2_1_12')
))
)
process.LikelihoodPdfDBReaderModule = cms.EDAnalyzer(
"LikelihoodPdfDBReader"
)
process.p1 = cms.Path(process.LikelihoodPdfDBReaderModule)
|
dataloader/viperlist_train.py | urasakikeisuke/rigidmask | 138 | 3976 | <reponame>urasakikeisuke/rigidmask<gh_stars>100-1000
import torch.utils.data as data
from PIL import Image
import os
import os.path
import numpy as np
import pdb
import glob
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def dataloader(filepath):
left_fold = 'image_2/'
train = glob.glob(filepath+left_fold+'/0*.jpg')
train = sorted(train)
l0_train = []
l1_train = []
flow_train = []
for img in train:
img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) ))
flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ')
if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)):
l0_train.append(img)
l1_train.append(img1)
flow_train.append(flowp)
return l0_train, l1_train, flow_train
|
bpython/curtsiesfrontend/parse.py | dtrodrigues/bpython | 2,168 | 4001 | import re
from curtsies.formatstring import fmtstr, FmtStr
from curtsies.termformatconstants import (
FG_COLORS,
BG_COLORS,
colors as CURTSIES_COLORS,
)
from functools import partial
from ..lazyre import LazyReCompile
COLORS = CURTSIES_COLORS + ("default",)
CNAMES = dict(zip("krgybmcwd", COLORS))
# hack for finding the "inverse"
INVERSE_COLORS = {
CURTSIES_COLORS[idx]: CURTSIES_COLORS[
(idx + (len(CURTSIES_COLORS) // 2)) % len(CURTSIES_COLORS)
]
for idx in range(len(CURTSIES_COLORS))
}
INVERSE_COLORS["default"] = INVERSE_COLORS[CURTSIES_COLORS[0]]
def func_for_letter(letter_color_code: str, default: str = "k"):
"""Returns FmtStr constructor for a bpython-style color code"""
if letter_color_code == "d":
letter_color_code = default
elif letter_color_code == "D":
letter_color_code = default.upper()
return partial(
fmtstr,
fg=CNAMES[letter_color_code.lower()],
bold=letter_color_code.isupper(),
)
def color_for_letter(letter_color_code: str, default: str = "k"):
if letter_color_code == "d":
letter_color_code = default
return CNAMES[letter_color_code.lower()]
def parse(s):
"""Returns a FmtStr object from a bpython-formatted colored string"""
rest = s
stuff = []
while True:
if not rest:
break
start, rest = peel_off_string(rest)
stuff.append(start)
return (
sum((fs_from_match(d) for d in stuff[1:]), fs_from_match(stuff[0]))
if len(stuff) > 0
else FmtStr()
)
def fs_from_match(d):
atts = {}
if d["fg"]:
# this isn't according to spec as I understand it
if d["fg"].isupper():
d["bold"] = True
# TODO figure out why boldness isn't based on presence of \x02
color = CNAMES[d["fg"].lower()]
if color != "default":
atts["fg"] = FG_COLORS[color]
if d["bg"]:
if d["bg"] == "I":
# hack for finding the "inverse"
color = INVERSE_COLORS[color]
else:
color = CNAMES[d["bg"].lower()]
if color != "default":
atts["bg"] = BG_COLORS[color]
if d["bold"]:
atts["bold"] = True
return fmtstr(d["string"], **atts)
peel_off_string_re = LazyReCompile(
r"""(?P<colormarker>\x01
(?P<fg>[krgybmcwdKRGYBMCWD]?)
(?P<bg>[krgybmcwdKRGYBMCWDI]?)?)
(?P<bold>\x02?)
\x03
(?P<string>[^\x04]*)
\x04
(?P<rest>.*)
""",
re.VERBOSE | re.DOTALL,
)
def peel_off_string(s):
m = peel_off_string_re.match(s)
assert m, repr(s)
d = m.groupdict()
rest = d["rest"]
del d["rest"]
return d, rest
|
ktrain/graph/learner.py | husmen/ktrain | 1,013 | 4003 | from ..imports import *
from .. import utils as U
from ..core import GenLearner
class NodeClassLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for node classification
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
class LinkPredLearner(GenLearner):
"""
```
Main class used to tune and train Keras models for link prediction
Main parameters are:
model (Model): A compiled instance of keras.engine.training.Model
train_data (Iterator): a Iterator instance for training set
val_data (Iterator): A Iterator instance for validation set
```
"""
def __init__(self, model, train_data=None, val_data=None,
batch_size=U.DEFAULT_BS, eval_batch_size=U.DEFAULT_BS,
workers=1, use_multiprocessing=False):
super().__init__(model, train_data=train_data, val_data=val_data,
batch_size=batch_size, eval_batch_size=eval_batch_size,
workers=workers, use_multiprocessing=use_multiprocessing)
return
def view_top_losses(self, n=4, preproc=None, val_data=None):
"""
```
Views observations with top losses in validation set.
Typically over-ridden by Learner subclasses.
Args:
n(int or tuple): a range to select in form of int or tuple
e.g., n=8 is treated as n=(0,8)
preproc (Preprocessor): A TextPreprocessor or ImagePreprocessor.
For some data like text data, a preprocessor
is required to undo the pre-processing
to correctly view raw data.
val_data: optional val_data to use instead of self.val_data
Returns:
list of n tuples where first element is either
filepath or id of validation example and second element
is loss.
```
"""
val = self._check_val(val_data)
# get top losses and associated data
tups = self.top_losses(n=n, val_data=val, preproc=preproc)
# get multilabel status and class names
classes = preproc.get_classes() if preproc is not None else None
# iterate through losses
for tup in tups:
# get data
idx = tup[0]
loss = tup[1]
truth = tup[2]
pred = tup[3]
print('----------')
print("id:%s | loss:%s | true:%s | pred:%s)\n" % (idx, round(loss,2), truth, pred))
#print(obs)
return
def layer_output(self, layer_id, example_id=0, batch_id=0, use_val=False):
"""
```
Prints output of layer with index <layer_id> to help debug models.
Uses first example (example_id=0) from training set, by default.
```
"""
raise Exception('currently_unsupported: layer_output method is not yet supported for ' +
'graph neural networks in ktrain')
|
ichnaea/data/export.py | rajreet/ichnaea | 348 | 4038 | from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
class IncomingQueue(object):
"""
The incoming queue contains the data collected in the web application. It
is the single entrypoint from which all other data pipelines get their
data.
It distributes the data into the configured export queues, checks those
queues and if they contain enough or old enough data schedules an async
export task to process the data in each queue.
"""
def __init__(self, task):
self.task = task
def __call__(self, export_task):
redis_client = self.task.redis_client
data_queue = self.task.app.data_queues["update_incoming"]
data = data_queue.dequeue()
grouped = defaultdict(list)
for item in data:
grouped[(item["api_key"], item.get("source", "gnss"))].append(
{"api_key": item["api_key"], "report": item["report"]}
)
with self.task.db_session(commit=False) as session:
export_configs = ExportConfig.all(session)
with self.task.redis_pipeline() as pipe:
for (api_key, source), items in grouped.items():
for config in export_configs:
if config.allowed(api_key, source):
queue_key = config.queue_key(api_key, source)
queue = config.queue(queue_key, redis_client)
queue.enqueue(items, pipe=pipe)
for config in export_configs:
# Check all queues if they now contain enough data or
# old enough data to be ready for processing.
for queue_key in config.partitions(redis_client):
queue = config.queue(queue_key, redis_client)
if queue.ready():
export_task.delay(config.name, queue_key)
if data_queue.ready():
self.task.apply_countdown()
class ReportExporter(object):
_retriable = (IOError,)
_retries = 3
_retry_wait = 1.0
def __init__(self, task, config, queue_key):
self.task = task
self.config = config
self.queue_key = queue_key
self.queue = config.queue(queue_key, task.redis_client)
self.stats_tags = ["key:" + self.config.name]
@staticmethod
def export(task, name, queue_key):
with task.db_session(commit=False) as session:
config = ExportConfig.get(session, name)
exporter_types = {
"dummy": DummyExporter,
"geosubmit": GeosubmitExporter,
"internal": InternalExporter,
"s3": S3Exporter,
}
exporter_type = exporter_types.get(config.schema)
if exporter_type is not None:
exporter_type(task, config, queue_key)()
def __call__(self):
queue_items = self.queue.dequeue()
if not queue_items:
return
success = False
for i in range(self._retries):
try:
with METRICS.timer("data.export.upload.timing", tags=self.stats_tags):
self.send(queue_items)
success = True
except self._retriable:
success = False
time.sleep(self._retry_wait * (i ** 2 + 1))
if success:
METRICS.incr("data.export.batch", tags=self.stats_tags)
break
if success and self.queue.ready():
self.task.apply_countdown(args=[self.config.name, self.queue_key])
def send(self, queue_items):
raise NotImplementedError()
class DummyExporter(ReportExporter):
def send(self, queue_items):
pass
class GeosubmitExporter(ReportExporter):
_retriable = (IOError, requests.exceptions.RequestException)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
headers = {
"Content-Encoding": "gzip",
"Content-Type": "application/json",
"User-Agent": "ichnaea",
}
response = requests.post(
self.config.url,
data=util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=5
),
headers=headers,
timeout=60.0,
)
# log upload_status and trigger exception for bad responses
# this causes the task to be re-tried
METRICS.incr(
"data.export.upload",
tags=self.stats_tags + ["status:%s" % response.status_code],
)
response.raise_for_status()
class S3Exporter(ReportExporter):
_retriable = (
IOError,
boto3.exceptions.Boto3Error,
botocore.exceptions.BotoCoreError,
)
def send(self, queue_items):
# ignore metadata
reports = [item["report"] for item in queue_items]
_, bucketname, path = urlparse(self.config.url)[:3]
# s3 key names start without a leading slash
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
year, month, day = util.utcnow().timetuple()[:3]
# strip away queue prefix again
parts = self.queue_key.split(":")
source = parts[1]
api_key = parts[2]
obj_name = path.format(
source=source, api_key=api_key, year=year, month=month, day=day
)
obj_name += uuid.uuid1().hex + ".json.gz"
try:
data = util.encode_gzip(
json.dumps({"items": reports}).encode(), compresslevel=7
)
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketname)
obj = bucket.Object(obj_name)
obj.put(Body=data, ContentEncoding="gzip", ContentType="application/json")
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:success"]
)
except Exception:
METRICS.incr(
"data.export.upload", tags=self.stats_tags + ["status:failure"]
)
raise
class InternalTransform(object):
"""
This maps the geosubmit v2 schema used in view code and external
transfers (backup, forward to partners) to the internal submit v1
schema used in our own database models.
"""
# *_id maps a source section id to a target section id
# *_map maps fields inside the section from source to target id
# if the names are equal, a simple string can be specified instead
# of a two-tuple
position_id = ("position", None)
position_map = [
("latitude", "lat"),
("longitude", "lon"),
"accuracy",
"altitude",
("altitudeAccuracy", "altitude_accuracy"),
"heading",
"pressure",
"speed",
"source",
]
blue_id = ("bluetoothBeacons", "blue")
blue_map = [("macAddress", "mac"), "age", ("signalStrength", "signal")]
cell_id = ("cellTowers", "cell")
cell_map = [
("radioType", "radio"),
("mobileCountryCode", "mcc"),
("mobileNetworkCode", "mnc"),
("locationAreaCode", "lac"),
("cellId", "cid"),
"age",
"asu",
("primaryScramblingCode", "psc"),
"serving",
("signalStrength", "signal"),
("timingAdvance", "ta"),
]
wifi_id = ("wifiAccessPoints", "wifi")
wifi_map = [
("macAddress", "mac"),
"age",
"channel",
"frequency",
("radioType", "radio"),
("signalToNoiseRatio", "snr"),
("signalStrength", "signal"),
]
def _map_dict(self, item_source, field_map):
value = {}
for spec in field_map:
if isinstance(spec, tuple):
source, target = spec
else:
source = spec
target = spec
source_value = item_source.get(source)
if source_value is not None:
value[target] = source_value
return value
def _parse_dict(self, item, report, key_map, field_map):
value = {}
item_source = item.get(key_map[0])
if item_source:
value = self._map_dict(item_source, field_map)
if value:
if key_map[1] is None:
report.update(value)
else:
report[key_map[1]] = value
return value
def _parse_list(self, item, report, key_map, field_map):
values = []
for value_item in item.get(key_map[0], ()):
value = self._map_dict(value_item, field_map)
if value:
values.append(value)
if values:
report[key_map[1]] = values
return values
def __call__(self, item):
report = {}
self._parse_dict(item, report, self.position_id, self.position_map)
blues = self._parse_list(item, report, self.blue_id, self.blue_map)
cells = self._parse_list(item, report, self.cell_id, self.cell_map)
wifis = self._parse_list(item, report, self.wifi_id, self.wifi_map)
position = item.get("position") or {}
gps_age = position.get("age", 0)
timestamp = item.get("timestamp")
if timestamp:
# turn timestamp into GPS timestamp
report["timestamp"] = timestamp - gps_age
if gps_age:
# Normalize age fields to be relative to GPS time
for type_ in ("blue", "cell", "wifi"):
for record in report.get(type_, ()):
record["age"] = record.get("age", 0) - gps_age
if blues or cells or wifis:
return report
return {}
class InternalExporter(ReportExporter):
_retriable = (IOError, redis.exceptions.RedisError, sqlalchemy.exc.InternalError)
transform = InternalTransform()
def send(self, queue_items):
api_keys = set()
api_keys_known = set()
metrics = {}
items = []
for item in queue_items:
# preprocess items and extract set of API keys
item["report"] = self.transform(item["report"])
if item["report"]:
items.append(item)
api_keys.add(item["api_key"])
for api_key in api_keys:
metrics[api_key] = {}
for type_ in ("report", "blue", "cell", "wifi"):
for action in ("drop", "upload"):
metrics[api_key]["%s_%s" % (type_, action)] = 0
with self.task.db_session(commit=False) as session:
# limit database session to get API keys
keys = [key for key in api_keys if key]
if keys:
columns = ApiKey.__table__.c
rows = session.execute(
select([columns.valid_key]).where(columns.valid_key.in_(keys))
).fetchall()
for row in rows:
api_keys_known.add(row.valid_key)
positions = []
observations = {"blue": [], "cell": [], "wifi": []}
for item in items:
api_key = item["api_key"]
report = item["report"]
obs, malformed_obs = self.process_report(report)
any_data = False
for name in ("blue", "cell", "wifi"):
if obs.get(name):
observations[name].extend(obs[name])
metrics[api_key][name + "_upload"] += len(obs[name])
any_data = True
metrics[api_key][name + "_drop"] += malformed_obs.get(name, 0)
metrics[api_key]["report_upload"] += 1
if any_data:
positions.append((report["lat"], report["lon"]))
else:
metrics[api_key]["report_drop"] += 1
with self.task.redis_pipeline() as pipe:
self.queue_observations(pipe, observations)
if _map_content_enabled and positions:
self.process_datamap(pipe, positions)
self.emit_metrics(api_keys_known, metrics)
def queue_observations(self, pipe, observations):
for datatype, shard_model, shard_key, queue_prefix in (
("blue", BlueShard, "mac", "update_blue_"),
("cell", CellShard, "cellid", "update_cell_"),
("wifi", WifiShard, "mac", "update_wifi_"),
):
queued_obs = defaultdict(list)
for obs in observations[datatype]:
# group by sharded queue
shard_id = shard_model.shard_id(getattr(obs, shard_key))
queue_id = queue_prefix + shard_id
queued_obs[queue_id].append(obs.to_json())
for queue_id, values in queued_obs.items():
# enqueue values for each queue
queue = self.task.app.data_queues[queue_id]
queue.enqueue(values, pipe=pipe)
def emit_metrics(self, api_keys_known, metrics):
for api_key, key_metrics in metrics.items():
api_tag = []
if api_key and api_key in api_keys_known:
api_tag = ["key:%s" % api_key]
for name, count in key_metrics.items():
if not count:
continue
type_, action = name.split("_")
if type_ == "report":
suffix = "report"
tags = api_tag
else:
suffix = "observation"
tags = ["type:%s" % type_] + api_tag
METRICS.incr("data.%s.%s" % (suffix, action), count, tags=tags)
def process_report(self, data):
report = Report.create(**data)
if report is None:
return ({}, {})
malformed = {}
observations = {}
for name, report_cls, obs_cls in (
("blue", BlueReport, BlueObservation),
("cell", CellReport, CellObservation),
("wifi", WifiReport, WifiObservation),
):
malformed[name] = 0
observations[name] = {}
if data.get(name):
for item in data[name]:
# validate the blue/cell/wifi specific fields
item_report = report_cls.create(**item)
if item_report is None:
malformed[name] += 1
continue
# combine general and specific report data into one
item_obs = obs_cls.combine(report, item_report)
item_key = item_obs.unique_key
# if we have better data for the same key, ignore
existing = observations[name].get(item_key)
if existing is not None and existing.better(item_obs):
continue
observations[name][item_key] = item_obs
obs = {
"blue": observations["blue"].values(),
"cell": observations["cell"].values(),
"wifi": observations["wifi"].values(),
}
return (obs, malformed)
def process_datamap(self, pipe, positions):
grids = set()
for lat, lon in positions:
if lat is not None and lon is not None:
grids.add(DataMap.scale(lat, lon))
shards = defaultdict(set)
for lat, lon in grids:
shards[DataMap.shard_id(lat, lon)].add(encode_datamap_grid(lat, lon))
for shard_id, values in shards.items():
queue = self.task.app.data_queues["update_datamap_" + shard_id]
queue.enqueue(list(values), pipe=pipe)
|