text
stringlengths 213
32.3k
|
---|
import re
import os
import sys
import time
import types
import getopt
import unittest
import traceback
try:
# Python >=2.7 and >=3.2
from unittest.runner import _TextTestResult
except ImportError:
from unittest import _TextTestResult
__metaclass__ = type
def stderr(text):
sys.stderr.write(text)
sys.stderr.write("\n")
class Options:
"""Configurable properties of the test runner."""
# test location
basedir = '' # base directory for tests (defaults to
# basedir of argv[0] + 'src'), must be absolute
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# which tests to run
unit_tests = False # unit tests (default if both are false)
functional_tests = False # functional tests
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
list_hooks = False # --list-hooks
run_tests = True # run tests (disabled by --list-foo)
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
warn_omitted = False # produce warnings when a test case is
# not included in a test suite (-w)
progress = False # show running progress (-p)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = False # show tracebacks twice (currently hardcoded)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Returns a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def get_test_files(cfg):
"""Returns a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
results = []
test_names = []
if cfg.unit_tests:
test_names.append('tests')
if cfg.functional_tests:
test_names.append('ftests')
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
if os.path.basename(dir) not in test_names:
for name in test_names:
if name + '.py' in files:
path = os.path.join(dir, name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
if '__init__.py' not in files:
stderr("%s is not a package" % dir)
return
for file in files:
if file.startswith('test') and file.endswith('.py'):
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
walker(cfg.basedir, visit, None)
results.sort()
return results
def import_module(filename, cfg, cov=None):
"""Imports and returns a module."""
filename = os.path.splitext(filename)[0]
modname = filename[len(cfg.basedir):].replace(os.path.sep, '.')
if modname.startswith('.'):
modname = modname[1:]
if cov is not None:
cov.start()
mod = __import__(modname)
if cov is not None:
cov.stop()
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def filter_testsuite(suite, matcher, level=None):
"""Returns a flattened list of test cases that match the given matcher."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = []
for test in suite._tests:
if level is not None and getattr(test, 'level', 0) > level:
continue
if isinstance(test, unittest.TestCase):
testname = test.id() # package.module.class.method
if matcher(testname):
results.append(test)
else:
filtered = filter_testsuite(test, matcher, level)
results.extend(filtered)
return results
def get_all_test_cases(module):
"""Returns a list of all test case classes defined in a given module."""
results = []
for name in dir(module):
if not name.startswith('Test'):
continue
item = getattr(module, name)
if (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
results.append(item)
return results
def get_test_classes_from_testsuite(suite):
"""Returns a set of test case classes used in a test suite."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = set()
for test in suite._tests:
if isinstance(test, unittest.TestCase):
results.add(test.__class__)
else:
classes = get_test_classes_from_testsuite(test)
results.update(classes)
return results
def get_test_cases(test_files, cfg, cov=None):
"""Returns a list of test cases from a given list of test modules."""
matcher = compile_matcher(cfg.test_regex)
results = []
for file in test_files:
module = import_module(file, cfg, cov=cov)
if cov is not None:
cov.start()
test_suite = module.test_suite()
if cov is not None:
cov.stop()
if test_suite is None:
continue
if cfg.warn_omitted:
all_classes = set(get_all_test_cases(module))
classes_in_suite = get_test_classes_from_testsuite(test_suite)
difference = all_classes - classes_in_suite
for test_class in difference:
# surround the warning with blank lines, otherwise it tends
# to get lost in the noise
stderr("\n%s: WARNING: %s not in test suite\n"
% (file, test_class.__name__))
if (cfg.level is not None and
getattr(test_suite, 'level', 0) > cfg.level):
continue
filtered = filter_testsuite(test_suite, matcher, cfg.level)
results.extend(filtered)
return results
def get_test_hooks(test_files, cfg, cov=None):
"""Returns a list of test hooks from a given list of test modules."""
results = []
dirs = set(map(os.path.dirname, test_files))
for dir in list(dirs):
if os.path.basename(dir) == 'ftests':
dirs.add(os.path.join(os.path.dirname(dir), 'tests'))
dirs = list(dirs)
dirs.sort()
for dir in dirs:
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
module = import_module(filename, cfg, tracer=tracer)
if cov is not None:
cov.start()
hooks = module.test_hooks()
if cov is not None:
cov.stop()
results.extend(hooks)
return results
class CustomTestResult(_TextTestResult):
"""Customised TestResult.
It can show a progress bar, and displays tracebacks for errors and failures
as soon as they happen, in addition to listing them all at the end.
"""
__super = _TextTestResult
__super_init = __super.__init__
__super_startTest = __super.startTest
__super_stopTest = __super.stopTest
__super_printErrors = __super.printErrors
def __init__(self, stream, descriptions, verbosity, count, cfg, hooks):
self.__super_init(stream, descriptions, verbosity)
self.count = count
self.cfg = cfg
self.hooks = hooks
if cfg.progress:
self.dots = False
self._lastWidth = 0
self._maxWidth = cfg.screen_width - len("xxxx/xxxx (xxx.x%): ") - 1
def startTest(self, test):
if self.cfg.progress:
# verbosity == 0: 'xxxx/xxxx (xxx.x%)'
# verbosity == 1: 'xxxx/xxxx (xxx.x%): test name'
# verbosity >= 2: 'xxxx/xxxx (xxx.x%): test name ... ok'
n = self.testsRun + 1
self.stream.write("\r%4d" % n)
if self.count:
self.stream.write("/%d (%5.1f%%)"
% (self.count, n * 100.0 / self.count))
if self.showAll: # self.cfg.verbosity == 1
self.stream.write(": ")
elif self.cfg.verbosity:
name = self.getShortDescription(test)
width = len(name)
if width < self._lastWidth:
name += " " * (self._lastWidth - width)
self.stream.write(": %s" % name)
self._lastWidth = width
self.stream.flush()
self.__super_startTest(test)
for hook in self.hooks:
hook.startTest(test)
def stopTest(self, test):
for hook in self.hooks:
hook.stopTest(test)
self.__super_stopTest(test)
def getShortDescription(self, test):
s = self.getDescription(test)
if len(s) > self._maxWidth:
# s is 'testname (package.module.class)'
# try to shorten it to 'testname (...age.module.class)'
# if it is still too long, shorten it to 'testnam...'
# limit case is 'testname (...)'
pos = s.find(" (")
if pos + len(" (...)") > self._maxWidth:
s = s[:self._maxWidth - 3] + "..."
else:
s = "%s...%s" % (s[:pos + 2], s[pos + 5 - self._maxWidth:])
return s
def printErrors(self):
if self.cfg.progress and not (self.dots or self.showAll):
self.stream.writeln()
self.__super_printErrors()
def formatError(self, err):
return "".join(traceback.format_exception(*err))
def printTraceback(self, kind, test, err):
self.stream.writeln()
self.stream.writeln()
self.stream.writeln("%s: %s" % (kind, test))
self.stream.writeln(self.formatError(err))
self.stream.writeln()
def addFailure(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("FAIL", test, err)
self.failures.append((test, self.formatError(err)))
def addError(self, test, err):
if self.cfg.immediate_errors:
self.printTraceback("ERROR", test, err)
self.errors.append((test, self.formatError(err)))
class CustomTestRunner(unittest.TextTestRunner):
"""Customised TestRunner.
See CustomisedTextResult for a list of extensions.
"""
__super = unittest.TextTestRunner
__super_init = __super.__init__
__super_run = __super.run
def __init__(self, cfg, hooks=None):
self.__super_init(verbosity=cfg.verbosity)
self.cfg = cfg
if hooks is not None:
self.hooks = hooks
else:
self.hooks = []
def run(self, test):
"""Run the given test case or test suite."""
self.count = test.countTestCases()
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = float(stopTime - startTime)
result.printErrors()
run = result.testsRun
if not self.cfg.quiet:
self.stream.writeln(result.separator2)
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = list(map(len, (result.failures, result.errors)))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
elif not self.cfg.quiet:
self.stream.writeln("OK")
return result
def _makeResult(self):
return CustomTestResult(self.stream, self.descriptions, self.verbosity,
cfg=self.cfg, count=self.count,
hooks=self.hooks)
def main(argv):
"""Main program."""
# Environment
if sys.version_info < (2, 7):
stderr('%s: need Python 2.7 or later' % argv[0])
stderr('your python is %s' % sys.version)
return 1
# Defaults
cfg = Options()
cfg.basedir = os.path.join(os.path.dirname(argv[0]), 'src')
cfg.basedir = os.path.abspath(cfg.basedir)
# Figure out terminal size
try:
import curses
except ImportError:
pass
else:
try:
curses.setupterm()
cols = curses.tigetnum('cols')
if cols > 0:
cfg.screen_width = cols
except (curses.error, TypeError):
# tigetnum() is broken in PyPy3 and raises TypeError
pass
# Option processing
opts, args = getopt.gnu_getopt(argv[1:], 'hvpqufw',
['list-files', 'list-tests', 'list-hooks',
'level=', 'all-levels', 'coverage'])
for k, v in opts:
if k == '-h':
print(__doc__)
return 0
elif k == '-v':
cfg.verbosity += 1
cfg.quiet = False
elif k == '-p':
cfg.progress = True
cfg.quiet = False
elif k == '-q':
cfg.verbosity = 0
cfg.progress = False
cfg.quiet = True
elif k == '-u':
cfg.unit_tests = True
elif k == '-f':
cfg.functional_tests = True
elif k == '-w':
cfg.warn_omitted = True
elif k == '--list-files':
cfg.list_files = True
cfg.run_tests = False
elif k == '--list-tests':
cfg.list_tests = True
cfg.run_tests = False
elif k == '--list-hooks':
cfg.list_hooks = True
cfg.run_tests = False
elif k == '--coverage':
cfg.coverage = True
elif k == '--level':
try:
cfg.level = int(v)
except ValueError:
stderr('%s: invalid level: %s' % (argv[0], v))
stderr('run %s -h for help')
return 1
elif k == '--all-levels':
cfg.level = None
else:
stderr('%s: invalid option: %s' % (argv[0], k))
stderr('run %s -h for help')
return 1
if args:
cfg.pathname_regex = args[0]
if len(args) > 1:
cfg.test_regex = args[1]
if len(args) > 2:
stderr('%s: too many arguments: %s' % (argv[0], args[2]))
stderr('run %s -h for help')
return 1
if not cfg.unit_tests and not cfg.functional_tests:
cfg.unit_tests = True
# Set up the python path
sys.path[0] = cfg.basedir
# Set up tracing before we start importing things
cov = None
if cfg.run_tests and cfg.coverage:
from coverage import coverage
cov = coverage(omit=['test.py'])
# Finding and importing
test_files = get_test_files(cfg)
if cov is not None:
cov.start()
if cfg.list_tests or cfg.run_tests:
test_cases = get_test_cases(test_files, cfg, cov=cov)
if cfg.list_hooks or cfg.run_tests:
test_hooks = get_test_hooks(test_files, cfg, cov=cov)
# Configure the logging module
import logging
logging.basicConfig()
logging.root.setLevel(logging.CRITICAL)
# Running
success = True
if cfg.list_files:
baselen = len(cfg.basedir) + 1
print("\n".join([fn[baselen:] for fn in test_files]))
if cfg.list_tests:
print("\n".join([test.id() for test in test_cases]))
if cfg.list_hooks:
print("\n".join([str(hook) for hook in test_hooks]))
if cfg.run_tests:
runner = CustomTestRunner(cfg, test_hooks)
suite = unittest.TestSuite()
suite.addTests(test_cases)
if cov is not None:
cov.start()
run_result = runner.run(suite)
if cov is not None:
cov.stop()
success = run_result.wasSuccessful()
del run_result
if cov is not None:
traced_file_types = ('.py', '.pyx', '.pxi', '.pxd')
modules = []
def add_file(_, path, files):
if 'tests' in os.path.relpath(path, cfg.basedir).split(os.sep):
return
for filename in files:
if filename.endswith(traced_file_types):
modules.append(os.path.join(path, filename))
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
walker(os.path.abspath(cfg.basedir), add_file, None)
try:
cov.xml_report(modules, outfile='coverage.xml')
if cfg.coverdir:
cov.html_report(modules, directory=cfg.coverdir)
finally:
# test runs can take a while, so at least try to print something
cov.report()
# That's all
if success:
return 0
else:
return 1
if __name__ == '__main__':
exitcode = main(sys.argv)
sys.exit(exitcode)
|
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR_DOMAIN,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.util import slugify
from . import DOMAIN as MYCHEVY_DOMAIN, UPDATE_TOPIC, EVBinarySensorConfig
SENSORS = [EVBinarySensorConfig("Plugged In", "plugged_in", "plug")]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the MyChevy sensors."""
if discovery_info is None:
return
sensors = []
hub = hass.data[MYCHEVY_DOMAIN]
for sconfig in SENSORS:
for car in hub.cars:
sensors.append(EVBinarySensor(hub, sconfig, car.vid))
async_add_entities(sensors)
class EVBinarySensor(BinarySensorEntity):
"""Base EVSensor class.
The only real difference between sensors is which units and what
attribute from the car object they are returning. All logic can be
built with just setting subclass attributes.
"""
def __init__(self, connection, config, car_vid):
"""Initialize sensor with car connection."""
self._conn = connection
self._name = config.name
self._attr = config.attr
self._type = config.device_class
self._is_on = None
self._car_vid = car_vid
self.entity_id = f"{BINARY_SENSOR_DOMAIN}.{MYCHEVY_DOMAIN}_{slugify(self._car.name)}_{slugify(self._name)}"
@property
def name(self):
"""Return the name."""
return self._name
@property
def is_on(self):
"""Return if on."""
return self._is_on
@property
def _car(self):
"""Return the car."""
return self._conn.get_car(self._car_vid)
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.async_update_callback
)
)
@callback
def async_update_callback(self):
"""Update state."""
if self._car is not None:
self._is_on = getattr(self._car, self._attr, None)
self.async_write_ha_state()
@property
def should_poll(self):
"""Return the polling state."""
return False
|
import pathlib
import re
from typing import Dict
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util.yaml import load_yaml
from .model import Integration
def exists(value):
"""Check if value exists."""
if value is None:
raise vol.Invalid("Value cannot be None")
return value
FIELD_SCHEMA = vol.Schema(
{
vol.Required("description"): str,
vol.Optional("example"): exists,
vol.Optional("default"): exists,
vol.Optional("values"): exists,
vol.Optional("required"): bool,
}
)
SERVICE_SCHEMA = vol.Schema(
{
vol.Required("description"): str,
vol.Optional("fields"): vol.Schema({str: FIELD_SCHEMA}),
}
)
SERVICES_SCHEMA = vol.Schema({cv.slug: SERVICE_SCHEMA})
def grep_dir(path: pathlib.Path, glob_pattern: str, search_pattern: str) -> bool:
"""Recursively go through a dir and it's children and find the regex."""
pattern = re.compile(search_pattern)
for fil in path.glob(glob_pattern):
if not fil.is_file():
continue
if pattern.search(fil.read_text()):
return True
return False
def validate_services(integration: Integration):
"""Validate services."""
# Find if integration uses services
has_services = grep_dir(
integration.path, "**/*.py", r"hass\.services\.(register|async_register)"
)
if not has_services:
return
try:
data = load_yaml(str(integration.path / "services.yaml"))
except FileNotFoundError:
integration.add_error("services", "Registers services but has no services.yaml")
return
except HomeAssistantError:
integration.add_error(
"services", "Registers services but unable to load services.yaml"
)
return
try:
SERVICES_SCHEMA(data)
except vol.Invalid as err:
integration.add_error(
"services", f"Invalid services.yaml: {humanize_error(data, err)}"
)
def validate(integrations: Dict[str, Integration], config):
"""Handle dependencies for integrations."""
# check services.yaml is cool
for integration in integrations.values():
if not integration.manifest:
continue
validate_services(integration)
|
import asyncio
import datetime as dt
import os
from typing import List
from httpx import RequestError
import onvif
from onvif import ONVIFCamera
from onvif.exceptions import ONVIFError
from zeep.exceptions import Fault
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from .const import (
ABSOLUTE_MOVE,
CONTINUOUS_MOVE,
GOTOPRESET_MOVE,
LOGGER,
PAN_FACTOR,
RELATIVE_MOVE,
TILT_FACTOR,
ZOOM_FACTOR,
)
from .event import EventManager
from .models import PTZ, Capabilities, DeviceInfo, Profile, Resolution, Video
class ONVIFDevice:
"""Manages an ONVIF device."""
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry = None):
"""Initialize the device."""
self.hass: HomeAssistant = hass
self.config_entry: ConfigEntry = config_entry
self.available: bool = True
self.device: ONVIFCamera = None
self.events: EventManager = None
self.info: DeviceInfo = DeviceInfo()
self.capabilities: Capabilities = Capabilities()
self.profiles: List[Profile] = []
self.max_resolution: int = 0
self._dt_diff_seconds: int = 0
@property
def name(self) -> str:
"""Return the name of this device."""
return self.config_entry.data[CONF_NAME]
@property
def host(self) -> str:
"""Return the host of this device."""
return self.config_entry.data[CONF_HOST]
@property
def port(self) -> int:
"""Return the port of this device."""
return self.config_entry.data[CONF_PORT]
@property
def username(self) -> int:
"""Return the username of this device."""
return self.config_entry.data[CONF_USERNAME]
@property
def password(self) -> int:
"""Return the password of this device."""
return self.config_entry.data[CONF_PASSWORD]
async def async_setup(self) -> bool:
"""Set up the device."""
self.device = get_device(
self.hass,
host=self.config_entry.data[CONF_HOST],
port=self.config_entry.data[CONF_PORT],
username=self.config_entry.data[CONF_USERNAME],
password=self.config_entry.data[CONF_PASSWORD],
)
# Get all device info
try:
await self.device.update_xaddrs()
await self.async_check_date_and_time()
# Create event manager
self.events = EventManager(
self.hass, self.device, self.config_entry.unique_id
)
# Fetch basic device info and capabilities
self.info = await self.async_get_device_info()
self.capabilities = await self.async_get_capabilities()
self.profiles = await self.async_get_profiles()
# No camera profiles to add
if not self.profiles:
return False
if self.capabilities.ptz:
self.device.create_ptz_service()
# Determine max resolution from profiles
self.max_resolution = max(
profile.video.resolution.width
for profile in self.profiles
if profile.video.encoding == "H264"
)
except RequestError as err:
LOGGER.warning(
"Couldn't connect to camera '%s', but will retry later. Error: %s",
self.name,
err,
)
self.available = False
except Fault as err:
LOGGER.error(
"Couldn't connect to camera '%s', please verify "
"that the credentials are correct. Error: %s",
self.name,
err,
)
return False
return True
async def async_stop(self, event=None):
"""Shut it all down."""
if self.events:
await self.events.async_stop()
await self.device.close()
async def async_check_date_and_time(self) -> None:
"""Warns if device and system date not synced."""
LOGGER.debug("Setting up the ONVIF device management service")
device_mgmt = self.device.create_devicemgmt_service()
LOGGER.debug("Retrieving current device date/time")
try:
system_date = dt_util.utcnow()
device_time = await device_mgmt.GetSystemDateAndTime()
if not device_time:
LOGGER.debug(
"""Couldn't get device '%s' date/time.
GetSystemDateAndTime() return null/empty""",
self.name,
)
return
if device_time.UTCDateTime:
tzone = dt_util.UTC
cdate = device_time.UTCDateTime
else:
tzone = (
dt_util.get_time_zone(device_time.TimeZone)
or dt_util.DEFAULT_TIME_ZONE
)
cdate = device_time.LocalDateTime
if cdate is None:
LOGGER.warning("Could not retrieve date/time on this camera")
else:
cam_date = dt.datetime(
cdate.Date.Year,
cdate.Date.Month,
cdate.Date.Day,
cdate.Time.Hour,
cdate.Time.Minute,
cdate.Time.Second,
0,
tzone,
)
cam_date_utc = cam_date.astimezone(dt_util.UTC)
LOGGER.debug(
"Device date/time: %s | System date/time: %s",
cam_date_utc,
system_date,
)
dt_diff = cam_date - system_date
self._dt_diff_seconds = dt_diff.total_seconds()
if self._dt_diff_seconds > 5:
LOGGER.warning(
"The date/time on the device (UTC) is '%s', "
"which is different from the system '%s', "
"this could lead to authentication issues",
cam_date_utc,
system_date,
)
except RequestError as err:
LOGGER.warning(
"Couldn't get device '%s' date/time. Error: %s", self.name, err
)
async def async_get_device_info(self) -> DeviceInfo:
"""Obtain information about this device."""
device_mgmt = self.device.create_devicemgmt_service()
device_info = await device_mgmt.GetDeviceInformation()
# Grab the last MAC address for backwards compatibility
mac = None
try:
network_interfaces = await device_mgmt.GetNetworkInterfaces()
for interface in network_interfaces:
if interface.Enabled:
mac = interface.Info.HwAddress
except Fault as fault:
if "not implemented" not in fault.message:
raise fault
LOGGER.debug(
"Couldn't get network interfaces from ONVIF device '%s'. Error: %s",
self.name,
fault,
)
return DeviceInfo(
device_info.Manufacturer,
device_info.Model,
device_info.FirmwareVersion,
device_info.SerialNumber,
mac,
)
async def async_get_capabilities(self):
"""Obtain information about the available services on the device."""
snapshot = False
try:
media_service = self.device.create_media_service()
media_capabilities = await media_service.GetServiceCapabilities()
snapshot = media_capabilities and media_capabilities.SnapshotUri
except (ONVIFError, Fault, RequestError):
pass
pullpoint = False
try:
pullpoint = await self.events.async_start()
except (ONVIFError, Fault):
pass
ptz = False
try:
self.device.get_definition("ptz")
ptz = True
except ONVIFError:
pass
return Capabilities(snapshot, pullpoint, ptz)
async def async_get_profiles(self) -> List[Profile]:
"""Obtain media profiles for this device."""
media_service = self.device.create_media_service()
result = await media_service.GetProfiles()
profiles = []
if not isinstance(result, list):
return profiles
for key, onvif_profile in enumerate(result):
# Only add H264 profiles
if (
not onvif_profile.VideoEncoderConfiguration
or onvif_profile.VideoEncoderConfiguration.Encoding != "H264"
):
continue
profile = Profile(
key,
onvif_profile.token,
onvif_profile.Name,
Video(
onvif_profile.VideoEncoderConfiguration.Encoding,
Resolution(
onvif_profile.VideoEncoderConfiguration.Resolution.Width,
onvif_profile.VideoEncoderConfiguration.Resolution.Height,
),
),
)
# Configure PTZ options
if self.capabilities.ptz and onvif_profile.PTZConfiguration:
profile.ptz = PTZ(
onvif_profile.PTZConfiguration.DefaultContinuousPanTiltVelocitySpace
is not None,
onvif_profile.PTZConfiguration.DefaultRelativePanTiltTranslationSpace
is not None,
onvif_profile.PTZConfiguration.DefaultAbsolutePantTiltPositionSpace
is not None,
)
try:
ptz_service = self.device.create_ptz_service()
presets = await ptz_service.GetPresets(profile.token)
profile.ptz.presets = [preset.token for preset in presets if preset]
except (Fault, RequestError):
# It's OK if Presets aren't supported
profile.ptz.presets = []
profiles.append(profile)
return profiles
async def async_get_stream_uri(self, profile: Profile) -> str:
"""Get the stream URI for a specified profile."""
media_service = self.device.create_media_service()
req = media_service.create_type("GetStreamUri")
req.ProfileToken = profile.token
req.StreamSetup = {
"Stream": "RTP-Unicast",
"Transport": {"Protocol": "RTSP"},
}
result = await media_service.GetStreamUri(req)
return result.Uri
async def async_perform_ptz(
self,
profile: Profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
):
"""Perform a PTZ action on the camera."""
if not self.capabilities.ptz:
LOGGER.warning("PTZ actions are not supported on device '%s'", self.name)
return
ptz_service = self.device.create_ptz_service()
pan_val = distance * PAN_FACTOR.get(pan, 0)
tilt_val = distance * TILT_FACTOR.get(tilt, 0)
zoom_val = distance * ZOOM_FACTOR.get(zoom, 0)
speed_val = speed
preset_val = preset
LOGGER.debug(
"Calling %s PTZ | Pan = %4.2f | Tilt = %4.2f | Zoom = %4.2f | Speed = %4.2f | Preset = %s",
move_mode,
pan_val,
tilt_val,
zoom_val,
speed_val,
preset_val,
)
try:
req = ptz_service.create_type(move_mode)
req.ProfileToken = profile.token
if move_mode == CONTINUOUS_MOVE:
# Guard against unsupported operation
if not profile.ptz.continuous:
LOGGER.warning(
"ContinuousMove not supported on device '%s'", self.name
)
return
req.Velocity = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
await ptz_service.ContinuousMove(req)
await asyncio.sleep(continuous_duration)
req = ptz_service.create_type("Stop")
req.ProfileToken = profile.token
await ptz_service.Stop(
{"ProfileToken": req.ProfileToken, "PanTilt": True, "Zoom": False}
)
elif move_mode == RELATIVE_MOVE:
# Guard against unsupported operation
if not profile.ptz.relative:
LOGGER.warning(
"RelativeMove not supported on device '%s'", self.name
)
return
req.Translation = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.RelativeMove(req)
elif move_mode == ABSOLUTE_MOVE:
# Guard against unsupported operation
if not profile.ptz.absolute:
LOGGER.warning(
"AbsoluteMove not supported on device '%s'", self.name
)
return
req.Position = {
"PanTilt": {"x": pan_val, "y": tilt_val},
"Zoom": {"x": zoom_val},
}
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.AbsoluteMove(req)
elif move_mode == GOTOPRESET_MOVE:
# Guard against unsupported operation
if preset_val not in profile.ptz.presets:
LOGGER.warning(
"PTZ preset '%s' does not exist on device '%s'. Available Presets: %s",
preset_val,
self.name,
", ".join(profile.ptz.presets),
)
return
req.PresetToken = preset_val
req.Speed = {
"PanTilt": {"x": speed_val, "y": speed_val},
"Zoom": {"x": speed_val},
}
await ptz_service.GotoPreset(req)
except ONVIFError as err:
if "Bad Request" in err.reason:
LOGGER.warning("Device '%s' doesn't support PTZ.", self.name)
else:
LOGGER.error("Error trying to perform PTZ action: %s", err)
def get_device(hass, host, port, username, password) -> ONVIFCamera:
"""Get ONVIFCamera instance."""
return ONVIFCamera(
host,
port,
username,
password,
f"{os.path.dirname(onvif.__file__)}/wsdl/",
no_cache=True,
)
|
import sys
import mne
def clean_ecg_eog(in_fif_fname, out_fif_fname=None, eog=True, ecg=True,
ecg_proj_fname=None, eog_proj_fname=None,
ecg_event_fname=None, eog_event_fname=None, in_path='.',
quiet=False):
"""Clean ECG from raw fif file.
Parameters
----------
in_fif_fname : str
Raw fif File
eog_event_fname : str
name of EOG event file required.
eog : bool
Reject or not EOG artifacts.
ecg : bool
Reject or not ECG artifacts.
ecg_event_fname : str
name of ECG event file required.
in_path : str
Path where all the files are.
"""
if not eog and not ecg:
raise Exception("EOG and ECG cannot be both disabled")
# Reading fif File
raw_in = mne.io.read_raw_fif(in_fif_fname)
if in_fif_fname.endswith('_raw.fif') or in_fif_fname.endswith('-raw.fif'):
prefix = in_fif_fname[:-8]
else:
prefix = in_fif_fname[:-4]
if out_fif_fname is None:
out_fif_fname = prefix + '_clean_ecg_eog_raw.fif'
if ecg_proj_fname is None:
ecg_proj_fname = prefix + '_ecg-proj.fif'
if eog_proj_fname is None:
eog_proj_fname = prefix + '_eog-proj.fif'
if ecg_event_fname is None:
ecg_event_fname = prefix + '_ecg-eve.fif'
if eog_event_fname is None:
eog_event_fname = prefix + '_eog-eve.fif'
print('Implementing ECG and EOG artifact rejection on data')
kwargs = dict() if quiet else dict(stdout=None, stderr=None)
if ecg:
ecg_events, _, _ = mne.preprocessing.find_ecg_events(
raw_in, reject_by_annotation=True)
print("Writing ECG events in %s" % ecg_event_fname)
mne.write_events(ecg_event_fname, ecg_events)
print('Computing ECG projector')
command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
'--events', ecg_event_fname, '--makeproj',
'--projtmin', '-0.08', '--projtmax', '0.08',
'--saveprojtag', '_ecg-proj', '--projnmag', '2',
'--projngrad', '1', '--projevent', '999', '--highpass', '5',
'--lowpass', '35', '--projmagrej', '4000',
'--projgradrej', '3000')
mne.utils.run_subprocess(command, **kwargs)
if eog:
eog_events = mne.preprocessing.find_eog_events(raw_in)
print("Writing EOG events in %s" % eog_event_fname)
mne.write_events(eog_event_fname, eog_events)
print('Computing EOG projector')
command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
'--events', eog_event_fname, '--makeproj',
'--projtmin', '-0.15', '--projtmax', '0.15',
'--saveprojtag', '_eog-proj', '--projnmag', '2',
'--projngrad', '2', '--projevent', '998', '--lowpass', '35',
'--projmagrej', '4000', '--projgradrej', '3000')
mne.utils.run_subprocess(command, **kwargs)
if out_fif_fname is not None:
# Applying the ECG EOG projector
print('Applying ECG EOG projector')
command = ('mne_process_raw', '--cd', in_path, '--raw', in_fif_fname,
'--proj', in_fif_fname, '--projoff', '--save',
out_fif_fname, '--filteroff',
'--proj', ecg_proj_fname, '--proj', eog_proj_fname)
mne.utils.run_subprocess(command, **kwargs)
print('Done removing artifacts.')
print("Cleaned raw data saved in: %s" % out_fif_fname)
print('IMPORTANT : Please eye-ball the data !!')
else:
print('Projection not applied to raw data.')
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-i", "--in", dest="raw_in",
help="Input raw FIF file", metavar="FILE")
parser.add_option("-o", "--out", dest="raw_out",
help="Output raw FIF file", metavar="FILE",
default=None)
parser.add_option("-e", "--no-eog", dest="eog", action="store_false",
help="Remove EOG", default=True)
parser.add_option("-c", "--no-ecg", dest="ecg", action="store_false",
help="Remove ECG", default=True)
parser.add_option("-q", "--quiet", dest="quiet", action="store_true",
help="Suppress mne_process_raw output", default=False)
options, args = parser.parse_args()
if options.raw_in is None:
parser.print_help()
sys.exit(1)
raw_in = options.raw_in
raw_out = options.raw_out
eog = options.eog
ecg = options.ecg
quiet = options.quiet
clean_ecg_eog(raw_in, raw_out, eog=eog, ecg=ecg, quiet=quiet)
mne.utils.run_command_if_main()
|
from datetime import timedelta
import logging
import aiodns
from aiodns.error import DNSError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_HOSTNAME = "hostname"
CONF_IPV6 = "ipv6"
CONF_RESOLVER = "resolver"
CONF_RESOLVER_IPV6 = "resolver_ipv6"
DEFAULT_HOSTNAME = "myip.opendns.com"
DEFAULT_IPV6 = False
DEFAULT_NAME = "myip"
DEFAULT_RESOLVER = "208.67.222.222"
DEFAULT_RESOLVER_IPV6 = "2620:0:ccc::2"
SCAN_INTERVAL = timedelta(seconds=120)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_HOSTNAME, default=DEFAULT_HOSTNAME): cv.string,
vol.Optional(CONF_RESOLVER, default=DEFAULT_RESOLVER): cv.string,
vol.Optional(CONF_RESOLVER_IPV6, default=DEFAULT_RESOLVER_IPV6): cv.string,
vol.Optional(CONF_IPV6, default=DEFAULT_IPV6): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the DNS IP sensor."""
hostname = config[CONF_HOSTNAME]
name = config.get(CONF_NAME)
if not name:
if hostname == DEFAULT_HOSTNAME:
name = DEFAULT_NAME
else:
name = hostname
ipv6 = config[CONF_IPV6]
if ipv6:
resolver = config[CONF_RESOLVER_IPV6]
else:
resolver = config[CONF_RESOLVER]
async_add_devices([WanIpSensor(hass, name, hostname, resolver, ipv6)], True)
class WanIpSensor(Entity):
"""Implementation of a DNS IP sensor."""
def __init__(self, hass, name, hostname, resolver, ipv6):
"""Initialize the DNS IP sensor."""
self.hass = hass
self._name = name
self.hostname = hostname
self.resolver = aiodns.DNSResolver()
self.resolver.nameservers = [resolver]
self.querytype = "AAAA" if ipv6 else "A"
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the current DNS IP address for hostname."""
return self._state
async def async_update(self):
"""Get the current DNS IP address for hostname."""
try:
response = await self.resolver.query(self.hostname, self.querytype)
except DNSError as err:
_LOGGER.warning("Exception while resolving host: %s", err)
response = None
if response:
self._state = response[0].host
else:
self._state = None
|
import StringIO
import sys
import time
from nose import tools
from docker_registry.core import exceptions
import docker_registry.testing as testing
from docker_registry.testing import mock_boto # noqa
from . import mock_s3 # noqa
class StringIOWithError(StringIO.StringIO):
'''Throw IOError after reaching EOF.'''
def read(self, size):
if self.pos == self.len:
raise IOError('Reading beyond EOF')
return StringIO.StringIO.read(self, size)
class TestDriver(testing.Driver):
'''Extra tests for coverage completion.'''
def __init__(self):
self.scheme = 's3'
self.path = ''
self.config = testing.Config({})
def tearDown(self):
self._storage._boto_bucket.delete()
super(TestDriver, self).tearDown()
@tools.raises(exceptions.FileNotFoundError)
def test_list_bucket(self):
# Add a couple of bucket keys
filename1 = self.gen_random_string()
filename2 = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename1, content)
# Check bucket key is stored in normalized form
self._storage.put_content(filename2 + '/', content)
# Check both keys are in the bucket
assert sorted([filename1, filename2]) == sorted(
list(self._storage.list_directory()))
# Check listing bucket raises exception after removing keys
self._storage.remove(filename1)
self._storage.remove(filename2)
s = self._storage.list_directory()
s.next()
def test_stream_write(self):
# Check stream write with buffer bigger than default 5MB
self._storage.buffer_size = 7 * 1024 * 1024
filename = self.gen_random_string()
# Test 8MB
content = self.gen_random_string(8 * 1024 * 1024)
io = StringIOWithError(content)
assert not self._storage.exists(filename)
try:
self._storage.stream_write(filename, io)
except IOError:
pass
assert self._storage.exists(filename)
# Test that EOFed io string throws IOError on lib/storage/s3
try:
self._storage.stream_write(filename, io)
except IOError:
pass
# Cleanup
io.close()
self._storage.remove(filename)
self._storage.buffer_size = 5 * 1024 * 1024
assert not self._storage.exists(filename)
def test_init_path(self):
# s3 storage _init_path result keys are relative (no / at start)
root_path = self._storage._root_path
if root_path.startswith('/'):
self._storage._root_path = root_path[1:]
assert not self._storage._init_path().startswith('/')
self._storage._root_path = root_path
def test_debug_key(self):
# Create a valid s3 key object to debug
filename = self.gen_random_string()
content = self.gen_random_string()
self._storage.put_content(filename, content)
# Get filename key path as stored
key_path = self._storage._init_path(filename)
key = self._storage._boto_bucket.lookup(key_path)
self._storage._debug_key(key)
# Capture debugged output
saved_stdout = sys.stdout
output = StringIO.StringIO()
sys.stdout = output
# As key is mocked for unittest purposes, we call make_request directly
dummy = "################\n('d', 1)\n{'v': 2}\n################\n"
# '{}\n{}\n{}\n{}\n'.format(
# '#' * 16, ('d', 1), {'v': 2}, '#' * 16)
result = self._storage._boto_bucket.connection.make_request(
'd', 1, v=2)
assert output.getvalue() == dummy
assert result == 'request result'
sys.stdout = saved_stdout
# We don't call self._storage.remove(filename) here to ensure tearDown
# cleanup properly and that other tests keep running as expected.
# Validation test for docker-index#486
def test_get_tags(self):
store = self._storage
store._root_path = 'my/custom/path'
store._init_path()
assert store._root_path == 'my/custom/path'
tag_path = store.tag_path('test', 'test', '0.0.2')
store.put_content(tag_path, 'randomdata')
tags_path = store.tag_path('test', 'test')
for fname in store.list_directory(tags_path):
full_tag_name = fname.split('/').pop()
if not full_tag_name == 'tag_0.0.2':
continue
try:
store.get_content(fname)
except exceptions.FileNotFoundError:
pass
except Exception as e:
raise e
else:
assert False
tag_content = store.get_content(tag_path)
assert tag_content == 'randomdata'
def test_consistency_latency(self):
self.testCount = -1
mockKey = mock_boto.Key()
def mockExists():
self.testCount += 1
return self.testCount == 1
mockKey.exists = mockExists
mockKey.get_contents_as_string = lambda: "Foo bar"
self._storage.makeKey = lambda x: mockKey
startTime = time.time()
content = self._storage.get_content("/FOO")
waitTime = time.time() - startTime
assert waitTime >= 0.1, ("Waiting time was less than %sms "
"(actual : %sms)" %
(0.1 * 1000, waitTime * 1000))
assert content == "Foo bar", ("expected : %s; actual: %s" %
("Foo bar", content))
@tools.raises(exceptions.FileNotFoundError)
def test_too_many_read_retries(self):
self.testCount = -1
mockKey = mock_boto.Key()
def mockExists():
self.testCount += 1
return self.testCount == 5
mockKey.exists = mockExists
mockKey.get_contents_as_string = lambda: "Foo bar"
self._storage.makeKey = lambda x: mockKey
self._storage.get_content("/FOO")
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import benchmark_sets
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_benchmarks
# This import to ensure required FLAGS are defined.
from perfkitbenchmarker import pkb # NOQA
import six
import yaml
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
USER_CONFIG = """
internal_iprf:
name: iperf
flags:
ip_addresses: INTERNAL
"""
MATRIX_CONFIG = """
netperf:
flag_matrix: GCP
flag_matrix_defs:
GCP:
machine_type: [n1-standard-1, n1-standard-4]
zones: [us-central1-a, us-central1-b]
"""
EXPECTED_MATRIX_FLAGS = [
{'machine_type': 'n1-standard-1', 'zones': 'us-central1-a'},
{'machine_type': 'n1-standard-1', 'zones': 'us-central1-b'},
{'machine_type': 'n1-standard-4', 'zones': 'us-central1-a'},
{'machine_type': 'n1-standard-4', 'zones': 'us-central1-b'}
]
ZIP_CONFIG_DIFFERENT_AXES_LENGTH = """
netperf:
flags:
gpu_type: k80
flag_zip: GCP
flag_zip_defs:
GCP:
machine_type: [n1-standard-4, n1-standard-8]
gpu_count: [1, 2, 3]
"""
ZIP_CONFIG = """
netperf:
flags:
gpu_type: k80
flag_zip: GCP
flag_zip_defs:
GCP:
machine_type: [n1-standard-4, n1-standard-8]
gpu_count: [1, 2]
"""
EXPECTED_ZIP_FLAGS = [
{'machine_type': 'n1-standard-4', 'gpu_count': 1, 'gpu_type': 'k80'},
{'machine_type': 'n1-standard-8', 'gpu_count': 2, 'gpu_type': 'k80'}
]
SINGLE_ZIP_CONFIG = """
netperf:
flags:
gpu_type: k80
flag_zip: GCP
flag_zip_defs:
GCP:
machine_type: [n1-standard-4, n1-standard-8]
"""
EXPECTED_SINGLE_ZIP_FLAGS = [
{'machine_type': 'n1-standard-4', 'gpu_type': 'k80'},
{'machine_type': 'n1-standard-8', 'gpu_type': 'k80'}
]
ZIP_AND_MATRIX_CONFIG = """
netperf:
flags:
gpu_type: k80
flag_zip: GCP
flag_matrix: GCP
flag_zip_defs:
GCP:
machine_type: [n1-standard-4, n1-standard-8]
gpu_count: [1, 2]
flag_matrix_defs:
GCP:
zones: [us-central1-a, us-central1-b]
"""
EXPECTED_ZIP_AND_MATRIX_FLAGS = [
{'zones': 'us-central1-a', 'gpu_type': 'k80',
'machine_type': 'n1-standard-4', 'gpu_count': 1},
{'zones': 'us-central1-b', 'gpu_type': 'k80',
'machine_type': 'n1-standard-4', 'gpu_count': 1},
{'zones': 'us-central1-b', 'gpu_type': 'k80',
'machine_type': 'n1-standard-8', 'gpu_count': 2},
{'zones': 'us-central1-a', 'gpu_type': 'k80',
'machine_type': 'n1-standard-8', 'gpu_count': 2}
]
FILTER_CONFIG = """
netperf:
flag_matrix: GCP
flag_matrix_filters:
GCP: "machine_type == 'n1-standard-1' and zones == 'us-central1-a'"
flag_matrix_defs:
GCP:
machine_type: [n1-standard-1, n1-standard-4]
zones: [us-central1-a, us-central1-b]
"""
FLAG_PRECEDENCE_CONFIG = """
flags:
netperf_benchmarks: TCP_RR
netperf_test_length: 30
netperf_max_iter: 3
netperf:
flags:
netperf_benchmarks: UDP_RR
netperf_test_length: 40
flag_matrix: test_matrix
flag_matrix_defs:
test_matrix:
netperf_benchmarks: [TCP_STREAM]
"""
class BenchmarkSetsTestCase(unittest.TestCase):
def setUp(self):
# create set of valid benchmark names from the benchmark directory
self.valid_benchmark_names = set()
for benchmark_module in linux_benchmarks.BENCHMARKS:
self.valid_benchmark_names.add(benchmark_module.BENCHMARK_NAME)
self.valid_benchmark_set_names = set()
# include the benchmark_set names since these can also appear
# as a valid name. At runtime they get expanded.
for benchmark_set_name in benchmark_sets.BENCHMARK_SETS:
self.valid_benchmark_set_names.add(benchmark_set_name)
# Mock flags to simulate setting --benchmarks.
p = mock.patch(benchmark_sets.__name__ + '.FLAGS')
self.mock_flags = p.start()
self.addCleanup(p.stop)
self.addCleanup(configs.GetConfigFlags.cache_clear)
self.mock_flags.flag_matrix = None
self.mock_flags.flag_zip = None
self.mock_flags.num_benchmark_copies = 1
def testStandardSet(self):
self.assertIn(benchmark_sets.STANDARD_SET, benchmark_sets.BENCHMARK_SETS)
standard_set = (benchmark_sets.BENCHMARK_SETS[
benchmark_sets.STANDARD_SET])[benchmark_sets.BENCHMARK_LIST]
self.assertIn('iperf', standard_set)
self.assertIn('fio', standard_set)
def testBenchmarkSetsHaveValidNames(self):
# check all the benchmark sets to make sure they contain valid names
valid_benchmark_and_set_names = (self.valid_benchmark_names |
self.valid_benchmark_set_names)
benchmark_set_items = list(benchmark_sets.BENCHMARK_SETS.items())
for _, key_value in benchmark_set_items:
benchmark_def_list = key_value[benchmark_sets.BENCHMARK_LIST]
for benchmark_name in benchmark_def_list:
self.assertIn(benchmark_name, valid_benchmark_and_set_names)
def testBenchmarkDerivedSets(self):
# make sure that sets which are derived from the standard_set
# expands into a valid set of benchmarks
with mock.patch.dict(
benchmark_sets.BENCHMARK_SETS, {
'test_derived_set': {
benchmark_sets.MESSAGE: 'test derived benchmark set.',
benchmark_sets.BENCHMARK_LIST: [benchmark_sets.STANDARD_SET]
}
}):
self.mock_flags.benchmarks = ['test_derived_set']
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertIsNotNone(benchmark_tuple_list)
self.assertGreater(len(benchmark_tuple_list), 0)
for benchmark_tuple in benchmark_tuple_list:
self.assertIn(benchmark_tuple[0].BENCHMARK_NAME,
self.valid_benchmark_names)
def testBenchmarkNestedDerivedSets(self):
# make sure that sets which are derived from the standard_set
# expands into a valid set of benchmarks
self.mock_flags.benchmarks = [benchmark_sets.STANDARD_SET]
standard_module_list = benchmark_sets.GetBenchmarksFromFlags()
with mock.patch.dict(
benchmark_sets.BENCHMARK_SETS, {
'test_derived_set': {
benchmark_sets.MESSAGE: 'test derived benchmark set.',
benchmark_sets.BENCHMARK_LIST: [benchmark_sets.STANDARD_SET]
},
'test_nested_derived_set': {
benchmark_sets.MESSAGE: 'test nested derived benchmark set.',
benchmark_sets.BENCHMARK_LIST: ['test_derived_set']
}
}):
# TODO(voellm): better check would be to make sure both lists are the same
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertIsNotNone(benchmark_tuple_list)
self.assertIsNotNone(standard_module_list)
self.assertEqual(len(benchmark_tuple_list), len(standard_module_list))
for benchmark_tuple in benchmark_tuple_list:
self.assertIn(benchmark_tuple[0].BENCHMARK_NAME,
self.valid_benchmark_names)
def testBenchmarkValidCommandLine1(self):
# make sure the standard_set expands to a valid set of benchmarks
self.mock_flags.benchmarks = ['standard_set']
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertIsNotNone(benchmark_tuple_list)
self.assertGreater(len(benchmark_tuple_list), 0)
for benchmark_tuple in benchmark_tuple_list:
self.assertIn(benchmark_tuple[0].BENCHMARK_NAME,
self.valid_benchmark_names)
@staticmethod
def _ContainsModule(module_name, module_list):
for module_tuple in module_list:
if module_tuple[0].BENCHMARK_NAME == module_name:
return True
return False
def testBenchmarkValidCommandLine2(self):
# make sure the standard_set plus a listed benchmark expands
# to a valid set of benchmarks
self.mock_flags.benchmarks = ['standard_set', 'bonnieplusplus']
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertIsNotNone(benchmark_tuple_list)
self.assertGreater(len(benchmark_tuple_list), 0)
for benchmark_tuple in benchmark_tuple_list:
self.assertIn(benchmark_tuple[0].BENCHMARK_NAME,
self.valid_benchmark_names)
# make sure bonnieplusplus is a listed benchmark
self.assertTrue(self._ContainsModule('bonnieplusplus',
benchmark_tuple_list))
def testBenchmarkValidCommandLine3(self):
# make sure the command with two benchmarks is processed correctly
self.mock_flags.benchmarks = ['iperf', 'fio']
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertIsNotNone(benchmark_tuple_list)
self.assertEqual(len(benchmark_tuple_list), 2)
for benchmark_tuple in benchmark_tuple_list:
self.assertIn(benchmark_tuple[0].BENCHMARK_NAME,
self.valid_benchmark_names)
# make sure listed benchmarks are present
self.assertTrue(self._ContainsModule('iperf', benchmark_tuple_list))
self.assertTrue(self._ContainsModule('fio', benchmark_tuple_list))
def testBenchmarkInvalidCommandLine1(self):
# make sure invalid benchmark names and sets cause a failure
self.mock_flags.benchmarks = ['standard_set_invalid_name']
self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags)
def testBenchmarkInvalidCommandLine2(self):
# make sure invalid benchmark names and sets cause a failure
self.mock_flags.benchmarks = ['standard_set', 'iperf_invalid_name']
self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags)
def testConfigNames(self):
self.mock_flags.benchmarks = ['internal_iprf', 'netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(USER_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertTrue(self._ContainsModule('iperf', benchmark_tuple_list))
self.assertTrue(self._ContainsModule('netperf', benchmark_tuple_list))
def testMatrices(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(MATRIX_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 4)
flag_list = [benchmark_tuple[1]['flags']
for benchmark_tuple in benchmark_tuple_list]
six.assertCountEqual(self, flag_list, EXPECTED_MATRIX_FLAGS)
def testZipWithDifferentAxesLengths(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(ZIP_CONFIG_DIFFERENT_AXES_LENGTH)):
self.assertRaises(ValueError, benchmark_sets.GetBenchmarksFromFlags)
def testZip(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(ZIP_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 2)
flag_list = [benchmark_tuple[1]['flags']
for benchmark_tuple in benchmark_tuple_list]
six.assertCountEqual(self, flag_list, EXPECTED_ZIP_FLAGS)
def testZipSingleAxis(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(SINGLE_ZIP_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 2)
flag_list = [benchmark_tuple[1]['flags']
for benchmark_tuple in benchmark_tuple_list]
six.assertCountEqual(self, flag_list, EXPECTED_SINGLE_ZIP_FLAGS)
def testZipAndMatrix(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(ZIP_AND_MATRIX_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 4)
flag_list = [benchmark_tuple[1]['flags']
for benchmark_tuple in benchmark_tuple_list]
six.assertCountEqual(self, flag_list, EXPECTED_ZIP_AND_MATRIX_FLAGS)
def testFilters(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(FILTER_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 1)
self.assertEqual(benchmark_tuple_list[0][1]['flags'],
{'zones': 'us-central1-a',
'machine_type': 'n1-standard-1'})
def testFlagPrecedence(self):
self.mock_flags.benchmarks = ['netperf']
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(FLAG_PRECEDENCE_CONFIG)):
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
self.assertEqual(len(benchmark_tuple_list), 1)
self.assertEqual(benchmark_tuple_list[0][1]['flags'],
{'netperf_benchmarks': 'TCP_STREAM',
'netperf_test_length': 40,
'netperf_max_iter': 3})
def testFlagMatrixNotFound(self):
self.mock_flags.benchmarks = ['netperf']
self.mock_flags.flag_matrix = 'bad_flag_matrix_name'
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(USER_CONFIG)):
with self.assertRaises(benchmark_sets.FlagMatrixNotFoundException):
benchmark_sets.GetBenchmarksFromFlags()
def testFlagZipNotFound(self):
self.mock_flags.benchmarks = ['netperf']
self.mock_flags.flag_zip = 'bad_flag_zip_name'
with mock.patch(
'perfkitbenchmarker.configs.GetUserConfig',
return_value=yaml.safe_load(USER_CONFIG)):
with self.assertRaises(benchmark_sets.FlagZipNotFoundException):
benchmark_sets.GetBenchmarksFromFlags()
if __name__ == '__main__':
unittest.main()
|
from absl import flags
from perfkitbenchmarker.linux_packages import nvidia_driver
FLAGS = flags.FLAGS
flags.DEFINE_string('torch_version', '1.7.1', 'The torch version.')
flags.DEFINE_string('torchvision_version', '0.8.2', 'The torchvision version.')
flags.DEFINE_string('torchaudio_version', '0.7.2', 'The torchaudio version.')
flags.DEFINE_string('torch_env', 'PATH=/opt/conda/bin:$PATH',
'The torch install environment.')
_PYTORCH_WHL = 'https://download.pytorch.org/whl/torch_stable.html'
def Install(vm):
"""Installs PyTorch on the VM."""
vm.InstallPackages('python3-pip')
toolkit = 'cpu'
if nvidia_driver.CheckNvidiaGpuExists(vm):
# Translates --cuda_toolkit_version=10.2 to "cu102" for the toolkit to
# install
toolkit = f'cu{"".join(FLAGS.cuda_toolkit_version.split("."))}'
vm.RemoteCommand(
f'{FLAGS.torch_env} python3 -m pip install '
f'torch=={FLAGS.torch_version}+{toolkit} '
f'torchvision=={FLAGS.torchvision_version}+{toolkit} '
f'torchaudio=={FLAGS.torchaudio_version} '
f'-f {_PYTORCH_WHL}')
def Uninstall(vm):
"""Uninstalls TensorFlow on the VM."""
vm.RemoteCommand(f'{FLAGS.torch_env} pip uninstall '
'torch torchvision torchaudio')
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# EC2 provides unique random hostnames.
def test_hostname(host):
pass
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import re
import os.path
import functools
from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QObject
from PyQt5.QtWebEngineWidgets import QWebEngineDownloadItem
from qutebrowser.browser import downloads, pdfjs
from qutebrowser.utils import debug, usertypes, message, log, objreg
class DownloadItem(downloads.AbstractDownloadItem):
"""A wrapper over a QWebEngineDownloadItem.
Attributes:
_qt_item: The wrapped item.
"""
def __init__(self, qt_item: QWebEngineDownloadItem,
manager: downloads.AbstractDownloadManager,
parent: QObject = None) -> None:
super().__init__(manager=manager, parent=manager)
self._qt_item = qt_item
qt_item.downloadProgress.connect( # type: ignore[attr-defined]
self.stats.on_download_progress)
qt_item.stateChanged.connect( # type: ignore[attr-defined]
self._on_state_changed)
# Ensure wrapped qt_item is deleted manually when the wrapper object
# is deleted. See https://github.com/qutebrowser/qutebrowser/issues/3373
self.destroyed.connect(self._qt_item.deleteLater)
def _is_page_download(self):
"""Check if this item is a page (i.e. mhtml) download."""
return (self._qt_item.savePageFormat() !=
QWebEngineDownloadItem.UnknownSaveFormat)
@pyqtSlot(QWebEngineDownloadItem.DownloadState)
def _on_state_changed(self, state):
state_name = debug.qenum_key(QWebEngineDownloadItem, state)
log.downloads.debug("State for {!r} changed to {}".format(
self, state_name))
if state == QWebEngineDownloadItem.DownloadRequested:
pass
elif state == QWebEngineDownloadItem.DownloadInProgress:
pass
elif state == QWebEngineDownloadItem.DownloadCompleted:
log.downloads.debug("Download {} finished".format(self.basename))
if self._is_page_download():
# Same logging as QtWebKit mhtml downloads.
log.downloads.debug("File successfully written.")
self.successful = True
self.done = True
self.finished.emit()
self.stats.finish()
elif state == QWebEngineDownloadItem.DownloadCancelled:
self.successful = False
self.done = True
self.cancelled.emit()
self.stats.finish()
elif state == QWebEngineDownloadItem.DownloadInterrupted:
self.successful = False
reason = self._qt_item.interruptReasonString()
self._die(reason)
else:
raise ValueError("_on_state_changed was called with unknown state "
"{}".format(state_name))
def _do_die(self):
progress_signal = self._qt_item.downloadProgress
progress_signal.disconnect() # type: ignore[attr-defined]
if self._qt_item.state() != QWebEngineDownloadItem.DownloadInterrupted:
self._qt_item.cancel()
def _do_cancel(self):
state = self._qt_item.state()
state_name = debug.qenum_key(QWebEngineDownloadItem, state)
assert state not in [QWebEngineDownloadItem.DownloadCompleted,
QWebEngineDownloadItem.DownloadCancelled], state_name
self._qt_item.cancel()
def retry(self):
state = self._qt_item.state()
if state != QWebEngineDownloadItem.DownloadInterrupted:
log.downloads.warning(
"Refusing to retry download in state {}".format(
debug.qenum_key(QWebEngineDownloadItem, state)))
return
self._qt_item.resume()
def _get_open_filename(self):
return self._filename
def url(self) -> QUrl:
return self._qt_item.url()
def _set_fileobj(self, fileobj, *, autoclose=True):
raise downloads.UnsupportedOperationError
def _set_tempfile(self, fileobj):
fileobj.close()
self._set_filename(fileobj.name, force_overwrite=True,
remember_directory=False)
def _ensure_can_set_filename(self, filename):
state = self._qt_item.state()
if state != QWebEngineDownloadItem.DownloadRequested:
state_name = debug.qenum_key(QWebEngineDownloadItem, state)
raise ValueError("Trying to set filename {} on {!r} which is "
"state {} (not in requested state)!".format(
filename, self, state_name))
def _ask_confirm_question(self, title, msg, *, custom_yes_action=None):
yes_action = custom_yes_action or self._after_set_filename
no_action = functools.partial(self.cancel, remove_data=False)
question = usertypes.Question()
question.title = title
question.text = msg
question.url = 'file://{}'.format(self._filename)
question.mode = usertypes.PromptMode.yesno
question.answered_yes.connect(yes_action)
question.answered_no.connect(no_action)
question.cancelled.connect(no_action)
self.cancelled.connect(question.abort)
self.error.connect(question.abort)
message.global_bridge.ask(question, blocking=True)
def _ask_create_parent_question(self, title, msg,
force_overwrite, remember_directory):
assert self._filename is not None
no_action = functools.partial(self.cancel, remove_data=False)
question = usertypes.Question()
question.title = title
question.text = msg
question.url = 'file://{}'.format(os.path.dirname(self._filename))
question.mode = usertypes.PromptMode.yesno
question.answered_yes.connect(lambda:
self._after_create_parent_question(
force_overwrite, remember_directory))
question.answered_no.connect(no_action)
question.cancelled.connect(no_action)
self.cancelled.connect(question.abort)
self.error.connect(question.abort)
message.global_bridge.ask(question, blocking=True)
def _after_set_filename(self):
assert self._filename is not None
dirname, basename = os.path.split(self._filename)
try:
# Qt 5.14
self._qt_item.setDownloadDirectory(dirname)
self._qt_item.setDownloadFileName(basename)
except AttributeError:
self._qt_item.setPath(self._filename)
self._qt_item.accept()
def _get_conflicting_download(self):
"""Return another potential active download with the same name.
webenginedownloads.DownloadItem needs to look for downloads both in its
manager and in qtnetwork-download-manager as both are used
simultaneously.
This method can be safely removed once #2328 is fixed.
"""
conflicting_download = super()._get_conflicting_download()
if conflicting_download:
return conflicting_download
qtnetwork_download_manager = objreg.get(
'qtnetwork-download-manager')
for download in qtnetwork_download_manager.downloads:
if self._conflicts_with(download):
return download
return None
def _get_suggested_filename(path):
"""Convert a path we got from chromium to a suggested filename.
Chromium thinks we want to download stuff to ~/Download, so even if we
don't, we get downloads with a suffix like (1) for files existing there.
We simply strip the suffix off via regex.
See https://bugreports.qt.io/browse/QTBUG-56978
"""
filename = os.path.basename(path)
suffix_re = re.compile(r"""
\ ? # Optional space between filename and suffix
(
# Numerical suffix
\([0-9]+\)
|
# ISO-8601 suffix
# https://cs.chromium.org/chromium/src/base/time/time_to_iso8601.cc
\ -\ \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z
)
(?=\.|$) # Begin of extension, or filename without extension
""", re.VERBOSE)
return suffix_re.sub('', filename)
class DownloadManager(downloads.AbstractDownloadManager):
"""Manager for currently running downloads.
Attributes:
_mhtml_target: DownloadTarget for the next MHTML download.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._mhtml_target = None
def install(self, profile):
"""Set up the download manager on a QWebEngineProfile."""
profile.downloadRequested.connect(self.handle_download,
Qt.DirectConnection)
@pyqtSlot(QWebEngineDownloadItem)
def handle_download(self, qt_item):
"""Start a download coming from a QWebEngineProfile."""
suggested_filename = _get_suggested_filename(qt_item.path())
use_pdfjs = pdfjs.should_use_pdfjs(qt_item.mimeType(), qt_item.url())
download = DownloadItem(qt_item, manager=self)
self._init_item(download, auto_remove=use_pdfjs,
suggested_filename=suggested_filename)
if self._mhtml_target is not None:
download.set_target(self._mhtml_target)
self._mhtml_target = None
return
if use_pdfjs:
download.set_target(downloads.PDFJSDownloadTarget())
return
filename = downloads.immediate_download_path()
if filename is not None:
# User doesn't want to be asked, so just use the download_dir
target = downloads.FileDownloadTarget(filename)
download.set_target(target)
return
# Ask the user for a filename - needs to be blocking!
question = downloads.get_filename_question(
suggested_filename=suggested_filename, url=qt_item.url(),
parent=self)
self._init_filename_question(question, download)
message.global_bridge.ask(question, blocking=True)
# The filename is set via the question.answered signal, connected in
# _init_filename_question.
def get_mhtml(self, tab, target):
"""Download the given tab as mhtml to the given target."""
assert tab.backend == usertypes.Backend.QtWebEngine
assert self._mhtml_target is None, self._mhtml_target
self._mhtml_target = target
tab.action.save_page()
|
from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext import whitespace_nlp
from scattertext.TermDocMatrixFromPandas import TermDocMatrixWithoutCategoriesFromPandas, TermDocMatrixFromPandas
from scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories
from scattertext.test.test_corpusFromPandas import get_docs_categories
class CorpusFromPandasWithoutCategories():
pass
def get_term_doc_matrix_without_categories():
categories, documents = get_docs_categories()
df = pd.DataFrame({'text': documents})
tdm = TermDocMatrixWithoutCategoriesFromPandas(df, 'text', nlp=whitespace_nlp).build()
return tdm
class TestCorpusFromPandasWithoutCategories(TestCase):
def test_term_category_matrix_from_pandas_without_categories(self):
tdm = get_term_doc_matrix_without_categories()
categories, documents = get_docs_categories()
reg_tdm = TermDocMatrixFromPandas(pd.DataFrame({'text': documents, 'categories': categories}),
text_col='text',
category_col='categories',
nlp=whitespace_nlp).build()
self.assertIsInstance(tdm, TermDocMatrixWithoutCategories)
self.assertEqual(tdm.get_terms(), reg_tdm.get_terms())
self.assertEqual(tdm.get_num_docs(), reg_tdm.get_num_docs())
np.testing.assert_equal(tdm.get_term_doc_mat().data, reg_tdm.get_term_doc_mat().data)
|
from ...utils import verbose
from ..utils import _data_path, _data_path_doc
@verbose
def data_path(path=None, force_update=False, update_path=True,
download=True, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='misc',
download=download)
data_path.__doc__ = _data_path_doc.format(name='misc',
conf='MNE_DATASETS_MISC_PATH')
|
import threading
import imp
import os
from stash.system import shthreads
def get_stash():
"""
returns the currently active StaSh-instance.
returns None if it can not be found.
This is useful for modules.
"""
if "_stash" in globals():
return globals()["_stash"]
for thr in threading.enumerate():
if isinstance(thr, shthreads.ShBaseThread):
ct = thr
while not ct.is_top_level():
ct = ct.parent
return ct.parent.stash
return None
def load_from_dir(dirpath, varname):
"""
returns a list of all variables named 'varname' in .py files in a directofy 'dirname'.
"""
if not os.path.isdir(dirpath):
return []
ret = []
for fn in os.listdir(dirpath):
fp = os.path.join(dirpath, fn)
if not os.path.isfile(fp):
continue
with open(fp, "r") as fin:
mod = imp.load_source(fn[:fn.index(".")], fp, fin)
if not hasattr(mod, varname):
continue
else:
ret.append(getattr(mod, varname))
return ret
|
import numpy as np
import warnings
import chainer
from chainer.backends import cuda
from chainercv.transforms import center_crop
from chainercv.transforms import resize
from chainercv.transforms import scale
from chainercv.transforms import ten_crop
class FeaturePredictor(chainer.Chain):
"""Wrapper that adds a prediction method to a feature extraction link.
The :meth:`predict` takes three steps to make a prediction.
1. Preprocess input images
2. Forward the preprocessed images to the network
3. Average features in the case when more than one crops are extracted.
Example:
>>> from chainercv.links import VGG16
>>> from chainercv.links import FeaturePredictor
>>> base_model = VGG16()
>>> model = FeaturePredictor(base_model, 224, 256)
>>> prob = model.predict([img])
# Predicting multiple features
>>> model.extractor.pick = ['conv5_3', 'fc7']
>>> conv5_3, fc7 = model.predict([img])
When :obj:`self.crop == 'center'`, :meth:`predict` extracts features from
the center crop of the input images.
When :obj:`self.crop == '10'`, :meth:`predict` extracts features from
patches that are ten-cropped from the input images.
When extracting more than one crops from an image, the output of
:meth:`predict` returns the average of the features computed from the
crops.
Args:
extractor: A feature extraction link. This is a callable chain
that takes a batch of images and returns a variable or a
tuple of variables.
crop_size (int or tuple): The height and the width of an image after
cropping in preprocessing.
If this is an integer, the image is cropped to
:math:`(crop\_size, crop\_size)`.
scale_size (int or tuple): If :obj:`scale_size` is :obj:`None`,
neither scaling nor resizing is conducted during preprocessing.
This is the default behavior.
If this is an integer, an image is resized so that the length of
the shorter edge is equal to :obj:`scale_size`. If this is a tuple
:obj:`(height, width)`, the image is resized to
:math:`(height, width)`.
crop ({'center', '10'}): Determines the style of cropping.
mean (numpy.ndarray): A mean value. If this is :obj:`None`,
:obj:`extractor.mean` is used as the mean value.
"""
def __init__(self, extractor,
crop_size, scale_size=None,
crop='center', mean=None):
super(FeaturePredictor, self).__init__()
self.scale_size = scale_size
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
self.crop_size = crop_size
self.crop = crop
with self.init_scope():
self.extractor = extractor
if mean is None:
self.mean = self.extractor.mean
else:
self.mean = mean
def _prepare(self, img):
"""Prepare an image for feeding it to a model.
This is a standard preprocessing scheme used by feature extraction
models.
First, the image is scaled or resized according to :math:`scale_size`.
Note that this step is optional.
Next, the image is cropped to :math:`crop_size`.
Last, the image is mean subtracted by an array :obj:`mean`.
Args:
img (~numpy.ndarray): An image. This is in CHW format.
The range of its value is :math:`[0, 255]`.
Returns:
~numpy.ndarray:
A preprocessed image. This is 4D array whose batch size is
the number of crops.
"""
if self.scale_size is not None:
if isinstance(self.scale_size, int):
img = scale(img, size=self.scale_size)
else:
img = resize(img, size=self.scale_size)
else:
img = img.copy()
if self.crop == '10':
imgs = ten_crop(img, self.crop_size)
elif self.crop == 'center':
imgs = center_crop(img, self.crop_size)[np.newaxis]
imgs -= self.mean[np.newaxis]
return imgs
def _average_crops(self, y, n_crop):
if y.ndim == 4:
warnings.warn(
'Four dimensional features are averaged. '
'If these are batch of 2D spatial features, '
'their spatial information would be lost.')
xp = chainer.backends.cuda.get_array_module(y)
y = y.reshape((-1, n_crop) + y.shape[1:])
y = xp.mean(y, axis=1)
return y
def predict(self, imgs):
"""Predict features from images.
Given :math:`N` input images, this method outputs a batched array with
batchsize :math:`N`.
Args:
imgs (iterable of numpy.ndarray): Array-images.
All images are in CHW format
and the range of their value is :math:`[0, 255]`.
Returns:
numpy.ndarray or tuple of numpy.ndarray:
A batch of features or a tuple of them.
"""
# [(C, H_0, W_0), ..., (C, H_{B-1}, W_{B-1})] -> (B, N, C, H, W)
imgs = self.xp.asarray([self._prepare(img) for img in imgs])
n_crop = imgs.shape[-4]
shape = (-1, imgs.shape[-3]) + self.crop_size
# (B, N, C, H, W) -> (B * N, C, H, W)
imgs = imgs.reshape(shape)
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
imgs = chainer.Variable(imgs)
features = self.extractor(imgs)
if isinstance(features, tuple):
output = []
for feature in features:
feature = feature.array
if n_crop > 1:
feature = self._average_crops(feature, n_crop)
output.append(cuda.to_cpu(feature))
output = tuple(output)
else:
output = cuda.to_cpu(features.array)
if n_crop > 1:
output = self._average_crops(output, n_crop)
return output
|
import argparse
import imp
import yaml
from yaml.scanner import ScannerError
TYPE = 'type'
LIST = 'list'
DESCRIPTION = 'description'
REQUIRED = 'required'
DEFAULT = 'default'
ALLOWED = 'allowed'
VALUES_DSC = 'values_description'
ONE_OF = 'one of'
SCHEMA = 'schema'
EXAMPLES = 'examples'
ANYOF = 'anyof'
NO_DSC = '(no description)'
VALIDATOR = 'validator'
NoneType = type(None)
class TextBlock(object):
def __init__(self, text, tab_replacement=' ', ending=''):
"""
:type text: str
"""
self.text = str(text).replace('\t', tab_replacement)
self.lines = self.text.splitlines()
self.width = max([len(line) for line in self.lines] + [0])
self.padded_width = self.width + 2
self.height = len(self.lines)
def get_line(self, item, raise_index_error=False, default=''):
try:
return self.lines[item]
except IndexError:
if raise_index_error:
raise
else:
return default
def get_line_justified(self, item, fillchar=' ', raise_index_error=False, default=''):
return self.get_line(item, raise_index_error, default).ljust(self.width, fillchar)
def __str__(self):
return self.text
def to_text_block(method):
def decorated(content):
if not isinstance(content, TextBlock):
return method(TextBlock(content))
else:
return method(TextBlock)
return decorated
class RSTRenderer(object):
def with_escape(method):
def escaped(content):
return method(RSTRenderer.escape(content))
return escaped
@staticmethod
def any_of_table(blocks):
"""
:type blocks: list of TextBlock
"""
HEADER = 'any of'
cnt = len(blocks)
# no need table for single content
if cnt < 2:
return blocks[0] if blocks else ''
# width = widths of contents + separators
width = max((len(HEADER), sum([c.padded_width for c in blocks]))) + (cnt + 1)
height = max([c.height for c in blocks])
# rows separators
top_bar = '+{}+'.format('-' * (width - 2))
header_bar = '+{}+'.format('+'.join(['=' * c.padded_width for c in blocks]))
bottom_bar = '+{}+'.format('+'.join(['-' * c.padded_width for c in blocks]))
header = '|{}|'.format(HEADER.center(width - 2))
body = '\n'.join(
['| {} |'.format(' | '.join([c.get_line_justified(i) for c in blocks])) for i in range(height)])
return '\n'.join([top_bar,
header,
header_bar,
body,
bottom_bar])
@staticmethod
def preserve_indents(block):
"""
:type block: TextBlock
"""
return '\n'.join(['| {}'.format(line) for line in block.lines])
@staticmethod
def bold(content):
"""
:type content: str
:return: str
"""
return '\n'.join(['**{}**'.format(line) for line in content.splitlines()])
@staticmethod
def title(content, new_line_replacement=' ', tab_replacement=' '):
"""
Underlines content with '='. New lines and tabs will be replaced
:param str content:
:param str new_line_replacement:
:param str tab_replacement:
:return: str
"""
prepared_content = content.strip().replace('\n', new_line_replacement).replace('\t', tab_replacement)
return '{}\n{}'.format(prepared_content, '=' * len(prepared_content))
@staticmethod
def subtitle(content, new_line_replacement=' ', tab_replacement=' '):
prepared_content = content.strip().replace('\n', new_line_replacement).replace('\t', tab_replacement)
return '{}\n{}'.format(prepared_content, '-' * len(prepared_content))
@staticmethod
@with_escape
@to_text_block
def italic(block):
"""
:type block: TextBlock
"""
return '\n'.join(['*{}*'.format(line) for line in block.lines])
@staticmethod
@to_text_block
def mono(block):
"""
:type block: TextBlock
"""
return '\n'.join(['``{}``'.format(line) for line in block.lines])
@classmethod
def bullet_list(cls, blocks):
"""
:type blocks: list of TextBlock
:rtype: TextBlock
"""
return TextBlock('\n'.join([cls._list_item(block) for block in blocks]))
@staticmethod
def _list_item(block):
"""
:type block: TextBlock
"""
return '- ' + '\n '.join(block.lines)
@staticmethod
def def_list(items, sort=True, newlines=True):
def format_value(value):
if isinstance(value, (int, bool, NoneType)):
return format_value(str(value))
if isinstance(value, str):
return '\n '.join(value.splitlines())
elif isinstance(value, TextBlock):
return '\n '.join(value.lines)
elif isinstance(value, dict):
return '\n '.join(RSTRenderer.def_list(value, sort, newlines).splitlines())
elif isinstance(value, list):
return '\n '.join(RSTRenderer.bullet_list([TextBlock(item) for item in value]).lines)
else:
raise ValueError('Unsupported value type: {}\n{}'.format(type(value), value))
sort = sorted if sort else lambda x: x
template = '{}\n {}' if newlines else ':{}: {}'
return '\n' + '\n'.join([template.format(k.replace('\n', ' '),
format_value(v).strip())
for k, v in sort(items.items())]) if items else ''
@staticmethod
def field_list(items, sort=True, newlines=True):
"""
:param bool newlines: add newlines between names and values
:param bool sort: sort items alphabetically by key
:type items: dict
:rtype: TextBlock
"""
def format_value(value):
if isinstance(value, (int, bool, NoneType)):
return format_value(str(value))
if isinstance(value, str):
return '\n '.join(value.splitlines())
elif isinstance(value, TextBlock):
return '\n '.join(value.lines)
elif isinstance(value, dict):
return '\n '.join(RSTRenderer.field_list(value, sort, newlines).splitlines())
elif isinstance(value, list):
return '\n '.join(RSTRenderer.bullet_list([TextBlock(item) for item in value]).lines)
else:
raise ValueError('Unsupported value type: {}\n{}'.format(type(value), value))
sort = sorted if sort else lambda x: x
template = ':{}:\n {}' if newlines else ':{}: {}'
return '\n' + '\n'.join([template.format(k.replace('\n', ' '),
format_value(v).strip())
for k, v in sort(items.items())]) if items else ''
@staticmethod
def dict_list_structure(items, sort_dict=True):
if isinstance(items, str):
return TextBlock(items)
elif isinstance(items, int):
return TextBlock(str(items))
elif isinstance(items, list):
return RSTRenderer.bullet_list([RSTRenderer.dict_list_structure(item) for item in items])
elif isinstance(items, dict):
return RSTRenderer.field_list({k: RSTRenderer.dict_list_structure(v) for k, v in items.items()}, sort_dict)
@staticmethod
def escape(content):
"""
:type content: str
"""
return content.replace('-', r'\-')
del with_escape
def render_body(renderer, option_kwargs, exclude_keys, special_keys=None):
"""
:type option_kwargs: dict
:type exclude_keys: list
:type special_keys: dict
"""
common_formatters = {
EXAMPLES: lambda examples: renderer.def_list({renderer.mono(example): annotation for example, annotation in examples.items()})
}
def default_fmt(x):
return x
special_keys = special_keys or {}
special_part = '\n'.join([special_handler(renderer, option_kwargs[special_key])
for special_key, special_handler in special_keys.items()
if special_key in option_kwargs])
uncommon_keys = set(exclude_keys) | set(special_keys.keys())
common_part = renderer.field_list({
k: common_formatters.get(k, default_fmt)(v)
for k, v in option_kwargs.items()
if k not in uncommon_keys
})
return '\n'.join([_ for _ in [common_part, special_part] if _])
def render_values_description(renderer, option_kwargs):
values_description_dict = {
value: option_kwargs[VALUES_DSC].get(value, '') for value in option_kwargs[ALLOWED]
} \
if ALLOWED in option_kwargs \
else \
option_kwargs[VALUES_DSC]
values_description = renderer.field_list(
{renderer.mono(value): dsc for value, dsc in values_description_dict.items()},
newlines=False
)
return renderer.field_list({ONE_OF: values_description})
def allowed(renderer, values):
return renderer.field_list({ONE_OF: '[{}]'.format(', '.join([renderer.mono(value) for value in values]))},
newlines=False)
class OptionFormatter(object):
def __init__(self, option_schema):
"""
:type option_schema: dict
"""
self.option_name, self.option_kwargs = next(iter(option_schema.items()))
# print(option_name, option_kwargs)
self.formatter = self.__guess_formatter()
def format_dsc(self, renderer):
dsc = self.option_kwargs.get(DESCRIPTION, NO_DSC).strip('. ')
if DEFAULT in self.option_kwargs:
default_value = self.option_kwargs.get(DEFAULT)
if default_value == '':
default_value = '""'
return ' '.join([renderer.italic('- {}. Default:'.format(dsc)),
renderer.mono(default_value)])
elif REQUIRED in self.option_kwargs:
return renderer.italic('- {}.'.format(dsc)) +\
' ' +\
renderer.bold('Required.')
else:
return renderer.italic('- {}.'.format(dsc))
def scalar_formatter(self, renderer, header=True):
hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \
if header else ''
dsc = self.format_dsc(renderer)
body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED], {'allowed': allowed})
return '\n'.join([_ for _ in [hdr, dsc, body] if _])
def scalar_with_values_description(self, renderer, header=True):
hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \
if header else ''
dsc = self.format_dsc(renderer)
body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, ALLOWED, VALUES_DSC])
values_description_block = render_values_description(renderer, self.option_kwargs)
return '\n'.join([_ for _ in [hdr, dsc, body, values_description_block] if _])
def dict_formatter(self, renderer, header=True):
hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(self.option_kwargs.get(TYPE))) \
if header else ''
dsc = self.format_dsc(renderer)
dict_schema = self.option_kwargs[SCHEMA]
schema_block = renderer.field_list({
'{} ({})'.format(renderer.mono(key), dict_schema[key].get(TYPE, 'anyof')): get_formatter({key: value})(renderer, header=False)
for key, value in dict_schema.items()})
body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, SCHEMA])
return '\n'.join([_ for _ in [hdr, dsc, schema_block, body] if _])
def anyof_formatter(self, renderer, header=True):
types = [case[TYPE] for case in self.option_kwargs[ANYOF] if TYPE in case]
hdr = renderer.subtitle(renderer.mono(self.option_name) + ' ' + '({})'.format(' or '.join(types))) \
if header else ''
dsc = self.format_dsc(renderer)
values_description_block = render_values_description(renderer, self.option_kwargs) \
if VALUES_DSC in self.option_kwargs else ''
body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DESCRIPTION, DEFAULT, REQUIRED, ANYOF, VALUES_DSC])
return '\n'.join([_ for _ in [hdr, dsc, values_description_block, body] if _])
def list_formatter(self, renderer, header=True):
schema = self.option_kwargs[SCHEMA]
hdr = renderer.subtitle(renderer.mono(self.option_name) + ' '
+ '({} of {})'.format(self.option_kwargs.get(TYPE, LIST), schema.get(TYPE, '')))
dsc = self.format_dsc(renderer)
body = render_body(renderer, self.option_kwargs, [VALIDATOR, TYPE, DEFAULT, REQUIRED, DESCRIPTION, SCHEMA])
if set(schema.keys()) - {TYPE}:
schema_block = renderer.field_list({
'[list_element] ({})'.format(schema.get(TYPE, '')):
get_formatter({'list_element': schema})(renderer, header=False)
})
return '\n'.join([_ for _ in [hdr, dsc, schema_block, body] if _])
else:
return '\n'.join([_ for _ in [hdr, dsc, body] if _])
def __guess_formatter(self):
if ANYOF in self.option_kwargs:
return self.anyof_formatter
elif SCHEMA in self.option_kwargs:
return self.list_formatter if self.option_kwargs.get(TYPE) == LIST else self.dict_formatter
elif VALUES_DSC in self.option_kwargs:
return self.scalar_with_values_description
else:
return self.scalar_formatter
def get_formatter(option_schema):
"""
:type option_schema: dict
"""
return OptionFormatter(option_schema).formatter
def format_option(option_schema, renderer):
return get_formatter(option_schema)(renderer)
def format_schema(schema, renderer, title=None):
"""
:param dict schema: Cerberus config schema
:type renderer: RSTRenderer
"""
body = '\n\n'.join(
sorted([format_option({option_name: option_schema}, renderer) for option_name, option_schema in schema.items()]))
if title:
title = renderer.title(title)
return title + '\n\n' + body
else:
return body
def main():
parser = argparse.ArgumentParser()
parser.add_argument('schema', help='Path to schema file')
parser.add_argument('-o', '--output_filename', default='output.rst', help='Name for the output rst document')
parser.add_argument('--title', default=None, help='Document title')
parser.add_argument('-a', '--append', action='store_true', help='Don\'t overwrite output file')
args = parser.parse_args()
schema_path = args.schema
output_filename = args.output_filename
title = args.title
append = args.append
try:
with open(schema_path) as f:
schema = yaml.load(f, Loader=yaml.FullLoader)
except ScannerError:
schema_module = imp.load_source('schema', schema_path)
schema = schema_module.OPTIONS
document = format_schema(schema, RSTRenderer(), title)
if append:
with open(output_filename, 'a') as f:
f.write('\n\n')
f.write(document)
else:
with open(output_filename, 'w') as f:
f.write(document)
if __name__ == '__main__':
main()
|
from homeassistant.components.ozw.fan import SPEED_TO_VALUE
from .common import setup_ozw
async def test_fan(hass, fan_data, fan_msg, sent_messages, caplog):
"""Test fan."""
receive_message = await setup_ozw(hass, fixture=fan_data)
# Test loaded
state = hass.states.get("fan.in_wall_smart_fan_control_level")
assert state is not None
assert state.state == "on"
# Test turning off
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.in_wall_smart_fan_control_level"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 172589073}
# Feedback on state
fan_msg.decode()
fan_msg.payload["Value"] = 0
fan_msg.encode()
receive_message(fan_msg)
await hass.async_block_till_done()
state = hass.states.get("fan.in_wall_smart_fan_control_level")
assert state is not None
assert state.state == "off"
# Test turning on
new_speed = "medium"
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": SPEED_TO_VALUE[new_speed],
"ValueIDKey": 172589073,
}
# Feedback on state
fan_msg.decode()
fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed]
fan_msg.encode()
receive_message(fan_msg)
await hass.async_block_till_done()
state = hass.states.get("fan.in_wall_smart_fan_control_level")
assert state is not None
assert state.state == "on"
assert state.attributes["speed"] == new_speed
# Test turn on without speed
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.in_wall_smart_fan_control_level"},
blocking=True,
)
assert len(sent_messages) == 3
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": 255,
"ValueIDKey": 172589073,
}
# Feedback on state
fan_msg.decode()
fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed]
fan_msg.encode()
receive_message(fan_msg)
await hass.async_block_till_done()
state = hass.states.get("fan.in_wall_smart_fan_control_level")
assert state is not None
assert state.state == "on"
assert state.attributes["speed"] == new_speed
# Test set speed to off
new_speed = "off"
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[-1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {
"Value": SPEED_TO_VALUE[new_speed],
"ValueIDKey": 172589073,
}
# Feedback on state
fan_msg.decode()
fan_msg.payload["Value"] = SPEED_TO_VALUE[new_speed]
fan_msg.encode()
receive_message(fan_msg)
await hass.async_block_till_done()
state = hass.states.get("fan.in_wall_smart_fan_control_level")
assert state is not None
assert state.state == "off"
# Test invalid speed
new_speed = "invalid"
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.in_wall_smart_fan_control_level", "speed": new_speed},
blocking=True,
)
assert len(sent_messages) == 4
assert "Invalid speed received: invalid" in caplog.text
|
from __future__ import print_function
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
import argparse
import atexit
import getpass
import sys
import ssl
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description='Process args for powering on a Virtual Machine')
parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store', help='Password to use when connecting to host')
parser.add_argument('-v', '--vmname', required=True, action='append', help='Names of the Virtual Machines to power on')
args = parser.parse_args()
return args
def WaitForTasks(tasks, si):
"""
Given the service instance si and tasks, it returns after all the
tasks are complete
"""
pc = si.content.propertyCollector
taskList = [str(task) for task in tasks]
# Create filter
objSpecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
propSpec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[], all=True)
filterSpec = vmodl.query.PropertyCollector.FilterSpec()
filterSpec.objectSet = objSpecs
filterSpec.propSet = [propSpec]
filter = pc.CreateFilter(filterSpec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(taskList):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in taskList:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
taskList.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
# Start program
def main():
"""
Simple command-line program for powering on virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and user %s: ' % (args.host,args.user))
try:
vmnames = args.vmname
if not len(vmnames):
print("No virtual machine specified for poweron")
sys.exit()
context = None
if hasattr(ssl, '_create_unverified_context'):
context = ssl._create_unverified_context()
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port),
sslContext=context)
if not si:
print("Cannot connect to specified host using specified username and password")
sys.exit()
atexit.register(Disconnect, si)
# Retreive the list of Virtual Machines from the inventory objects
# under the rootFolder
content = si.content
objView = content.viewManager.CreateContainerView(content.rootFolder,
[vim.VirtualMachine],
True)
vmList = objView.view
objView.Destroy()
# Find the vm and power it on
tasks = [vm.PowerOn() for vm in vmList if vm.name in vmnames]
# Wait for power on to complete
WaitForTasks(tasks, si)
print("Virtual Machine(s) have been powered on successfully")
except vmodl.MethodFault as e:
print("Caught vmodl fault : " + e.msg)
except Exception as e:
print("Caught Exception : " + str(e))
# Start program
if __name__ == "__main__":
main()
|
from homeassistant.const import STATE_OFF, STATE_ON
from .util import async_init_integration
async def test_create_binary_sensors(hass):
"""Test creation of binary sensors."""
await async_init_integration(hass)
state = hass.states.get("binary_sensor.master_suite_blower_active")
assert state.state == STATE_ON
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Blower Active",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("binary_sensor.downstairs_east_wing_blower_active")
assert state.state == STATE_OFF
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Downstairs East Wing Blower Active",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
import numpy as np
import pandas as pd
class PhraseSelector(object):
def __init__(self,
minimum_pmi=16):
'''
Filter n-grams using PMI.
Parameters
----------
alpha : float
labmda_ : "cressie_read"
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.power_divergence.html for
options.
'''
self.minimum_pmi = minimum_pmi
def compact(self, term_doc_matrix, non_text=False):
'''
Parameters
-------
term_doc_matrix : TermDocMatrix
Returns
-------
New term doc matrix
'''
count_df = self._get_statistics_dataframe(term_doc_matrix, non_text)
return term_doc_matrix.remove_terms(
count_df[count_df['pmi'] < self.minimum_pmi].index,
non_text=non_text
)
def _get_statistics_dataframe(self, term_doc_matrix, non_text):
tdf = (term_doc_matrix.get_metadata_freq_df().sum(axis=1)
if non_text
else term_doc_matrix.get_term_freq_df().sum(axis=1))
gram_df = pd.Series(tdf.index).apply(lambda x: pd.Series(list(reversed(x.split()))))
gram_df['c'] = tdf.values
gram_df['term'] = tdf.index
gram_df = gram_df.set_index('term')
unigram_df = gram_df[gram_df[1].isnull()][['c']]
ngram_df = gram_df.dropna()
count_df = pd.merge(pd.merge(ngram_df, unigram_df,
left_on=0, right_index=True, suffixes=('', '0')),
unigram_df, left_on=1, right_index=True, suffixes=('', '1'))
p0 = count_df['c0'] / unigram_df['c'].sum()
p1 = count_df['c1'] / unigram_df['c'].sum()
p = count_df['c'] / ngram_df['c'].sum()
count_df['pmi'] = np.log(p / (p0 * p1)) / np.log(2)
return count_df
|
import os
import os.path as op
from numpy.testing import assert_array_equal
from mne.utils import requires_mayavi, run_tests_if_main, traits_test
@requires_mayavi
@traits_test
def test_mri_model(subjects_dir_tmp):
"""Test MRIHeadWithFiducialsModel Traits Model."""
from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
tgt_fname = op.join(subjects_dir_tmp, 'test-fiducials.fif')
# Remove the two files that will make the fiducials okay via MNI estimation
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',
'talairach.xfm'))
model = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir_tmp)
model.subject = 'sample'
assert model.default_fid_fname[-20:] == "sample-fiducials.fif"
assert not model.can_reset
assert not model.can_save
model.lpa = [[-1, 0, 0]]
model.nasion = [[0, 1, 0]]
model.rpa = [[1, 0, 0]]
assert not model.can_reset
assert model.can_save
bem_fname = op.basename(model.bem_high_res.file)
assert not model.can_reset
assert bem_fname == 'sample-head-dense.fif'
model.save(tgt_fname)
assert model.fid_file == tgt_fname
# resetting the file should not affect the model's fiducials
model.fid_file = ''
assert_array_equal(model.lpa, [[-1, 0, 0]])
assert_array_equal(model.nasion, [[0, 1, 0]])
assert_array_equal(model.rpa, [[1, 0, 0]])
# reset model
model.lpa = [[0, 0, 0]]
model.nasion = [[0, 0, 0]]
model.rpa = [[0, 0, 0]]
assert_array_equal(model.lpa, [[0, 0, 0]])
assert_array_equal(model.nasion, [[0, 0, 0]])
assert_array_equal(model.rpa, [[0, 0, 0]])
# loading the file should assign the model's fiducials
model.fid_file = tgt_fname
assert_array_equal(model.lpa, [[-1, 0, 0]])
assert_array_equal(model.nasion, [[0, 1, 0]])
assert_array_equal(model.rpa, [[1, 0, 0]])
# after changing from file model should be able to reset
model.nasion = [[1, 1, 1]]
assert model.can_reset
model.reset = True
assert_array_equal(model.nasion, [[0, 1, 0]])
run_tests_if_main()
|
import logging
import voluptuous as vol
from homeassistant.components.mqtt import valid_publish_topic, valid_subscribe_topic
from homeassistant.const import CONF_OPTIMISTIC
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_DEVICES,
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAYS,
CONF_NODES,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
CONF_VERSION,
DOMAIN,
MYSENSORS_GATEWAYS,
)
from .device import get_mysensors_devices
from .gateway import finish_setup, get_mysensors_gateway, setup_gateways
_LOGGER = logging.getLogger(__name__)
CONF_DEBUG = "debug"
CONF_NODE_NAME = "name"
DEFAULT_BAUD_RATE = 115200
DEFAULT_TCP_PORT = 5003
DEFAULT_VERSION = "1.4"
def has_all_unique_files(value):
"""Validate that all persistence files are unique and set if any is set."""
persistence_files = [gateway.get(CONF_PERSISTENCE_FILE) for gateway in value]
if None in persistence_files and any(
name is not None for name in persistence_files
):
raise vol.Invalid(
"persistence file name of all devices must be set if any is set"
)
if not all(name is None for name in persistence_files):
schema = vol.Schema(vol.Unique())
schema(persistence_files)
return value
def is_persistence_file(value):
"""Validate that persistence file path ends in either .pickle or .json."""
if value.endswith((".json", ".pickle")):
return value
raise vol.Invalid(f"{value} does not end in either `.json` or `.pickle`")
def deprecated(key):
"""Mark key as deprecated in configuration."""
def validator(config):
"""Check if key is in config, log warning and remove key."""
if key not in config:
return config
_LOGGER.warning(
"%s option for %s is deprecated. Please remove %s from your "
"configuration file",
key,
DOMAIN,
key,
)
config.pop(key)
return config
return validator
NODE_SCHEMA = vol.Schema({cv.positive_int: {vol.Required(CONF_NODE_NAME): cv.string}})
GATEWAY_SCHEMA = {
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_PERSISTENCE_FILE): vol.All(cv.string, is_persistence_file),
vol.Optional(CONF_BAUD_RATE, default=DEFAULT_BAUD_RATE): cv.positive_int,
vol.Optional(CONF_TCP_PORT, default=DEFAULT_TCP_PORT): cv.port,
vol.Optional(CONF_TOPIC_IN_PREFIX): valid_subscribe_topic,
vol.Optional(CONF_TOPIC_OUT_PREFIX): valid_publish_topic,
vol.Optional(CONF_NODES, default={}): NODE_SCHEMA,
}
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
deprecated(CONF_DEBUG),
{
vol.Required(CONF_GATEWAYS): vol.All(
cv.ensure_list, has_all_unique_files, [GATEWAY_SCHEMA]
),
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
vol.Optional(CONF_PERSISTENCE, default=True): cv.boolean,
vol.Optional(CONF_RETAIN, default=True): cv.boolean,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): cv.string,
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the MySensors component."""
gateways = await setup_gateways(hass, config)
if not gateways:
_LOGGER.error("No devices could be setup as gateways, check your configuration")
return False
hass.data[MYSENSORS_GATEWAYS] = gateways
hass.async_create_task(finish_setup(hass, config, gateways))
return True
def _get_mysensors_name(gateway, node_id, child_id):
"""Return a name for a node child."""
node_name = f"{gateway.sensors[node_id].sketch_name} {node_id}"
node_name = next(
(
node[CONF_NODE_NAME]
for conf_id, node in gateway.nodes_config.items()
if node.get(CONF_NODE_NAME) is not None and conf_id == node_id
),
node_name,
)
return f"{node_name} {child_id}"
@callback
def setup_mysensors_platform(
hass,
domain,
discovery_info,
device_class,
device_args=None,
async_add_entities=None,
):
"""Set up a MySensors platform."""
# Only act if called via MySensors by discovery event.
# Otherwise gateway is not set up.
if not discovery_info:
return None
if device_args is None:
device_args = ()
new_devices = []
new_dev_ids = discovery_info[ATTR_DEVICES]
for dev_id in new_dev_ids:
devices = get_mysensors_devices(hass, domain)
if dev_id in devices:
continue
gateway_id, node_id, child_id, value_type = dev_id
gateway = get_mysensors_gateway(hass, gateway_id)
if not gateway:
continue
device_class_copy = device_class
if isinstance(device_class, dict):
child = gateway.sensors[node_id].children[child_id]
s_type = gateway.const.Presentation(child.type).name
device_class_copy = device_class[s_type]
name = _get_mysensors_name(gateway, node_id, child_id)
args_copy = (*device_args, gateway, node_id, child_id, name, value_type)
devices[dev_id] = device_class_copy(*args_copy)
new_devices.append(devices[dev_id])
if new_devices:
_LOGGER.info("Adding new devices: %s", new_devices)
if async_add_entities is not None:
async_add_entities(new_devices, True)
return new_devices
|
from collections import Counter
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class PyatePhrases(FeatsFromSpacyDoc):
def __init__(self, extractor=None, **args):
import pyate
self._extractor = pyate.combo_basic if extractor is None else extractor
FeatsFromSpacyDoc.__init__(self, **args)
def get_feats(self, doc):
return Counter(self._extractor(str(doc)).to_dict())
|
from collections import deque
from functools import partial
from io import BytesIO
from time import time
from kombu.asynchronous.hub import READ, WRITE, get_event_loop
from kombu.exceptions import HttpError
from kombu.utils.encoding import bytes_to_str
from .base import BaseClient
try:
import pycurl # noqa
except ImportError: # pragma: no cover
pycurl = Curl = METH_TO_CURL = None # noqa
else:
from pycurl import Curl # noqa
METH_TO_CURL = { # noqa
'GET': pycurl.HTTPGET,
'POST': pycurl.POST,
'PUT': pycurl.UPLOAD,
'HEAD': pycurl.NOBODY,
}
__all__ = ('CurlClient',)
DEFAULT_USER_AGENT = 'Mozilla/5.0 (compatible; pycurl)'
EXTRA_METHODS = frozenset(['DELETE', 'OPTIONS', 'PATCH'])
class CurlClient(BaseClient):
"""Curl HTTP Client."""
Curl = Curl
def __init__(self, hub=None, max_clients=10):
if pycurl is None:
raise ImportError('The curl client requires the pycurl library.')
hub = hub or get_event_loop()
super().__init__(hub)
self.max_clients = max_clients
self._multi = pycurl.CurlMulti()
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [self.Curl() for i in range(max_clients)]
self._free_list = self._curls[:]
self._pending = deque()
self._fds = {}
self._socket_action = self._multi.socket_action
self._timeout_check_tref = self.hub.call_repeatedly(
1.0, self._timeout_check,
)
# pycurl 7.29.0 workaround
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self):
self._timeout_check_tref.cancel()
for _curl in self._curls:
_curl.close()
self._multi.close()
def add_request(self, request):
self._pending.append(request)
self._process_queue()
self._set_timeout(0)
return request
# the next two methods are used for linux/epoll workaround:
# we temporarily remove all curl fds from hub, so curl cannot
# close a fd which is still inside epoll
def _pop_from_hub(self):
for fd in self._fds:
self.hub.remove(fd)
def _push_to_hub(self):
for fd, events in self._fds.items():
if events & READ:
self.hub.add_reader(fd, self.on_readable, fd)
if events & WRITE:
self.hub.add_writer(fd, self.on_writable, fd)
def _handle_socket(self, event, fd, multi, data, _pycurl=pycurl):
if event == _pycurl.POLL_REMOVE:
if fd in self._fds:
self._fds.pop(fd, None)
else:
if event == _pycurl.POLL_IN:
self._fds[fd] = READ
elif event == _pycurl.POLL_OUT:
self._fds[fd] = WRITE
elif event == _pycurl.POLL_INOUT:
self._fds[fd] = READ | WRITE
def _set_timeout(self, msecs):
pass # TODO
def _timeout_check(self, _pycurl=pycurl):
self._pop_from_hub()
try:
while 1:
try:
ret, _ = self._multi.socket_all()
except pycurl.error as exc:
ret = exc.args[0]
if ret != _pycurl.E_CALL_MULTI_PERFORM:
break
finally:
self._push_to_hub()
self._process_pending_requests()
def on_readable(self, fd, _pycurl=pycurl):
return self._on_event(fd, _pycurl.CSELECT_IN)
def on_writable(self, fd, _pycurl=pycurl):
return self._on_event(fd, _pycurl.CSELECT_OUT)
def _on_event(self, fd, event, _pycurl=pycurl):
self._pop_from_hub()
try:
while 1:
try:
ret, _ = self._socket_action(fd, event)
except pycurl.error as exc:
ret = exc.args[0]
if ret != _pycurl.E_CALL_MULTI_PERFORM:
break
finally:
self._push_to_hub()
self._process_pending_requests()
def _process_pending_requests(self):
while 1:
q, succeeded, failed = self._multi.info_read()
for curl in succeeded:
self._process(curl)
for curl, errno, reason in failed:
self._process(curl, errno, reason)
if q == 0:
break
self._process_queue()
def _process_queue(self):
while 1:
started = 0
while self._free_list and self._pending:
started += 1
curl = self._free_list.pop()
request = self._pending.popleft()
headers = self.Headers()
buf = BytesIO()
curl.info = {
'headers': headers,
'buffer': buf,
'request': request,
'curl_start_time': time(),
}
self._setup_request(curl, request, buf, headers)
self._multi.add_handle(curl)
if not started:
break
def _process(self, curl, errno=None, reason=None, _pycurl=pycurl):
info, curl.info = curl.info, None
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info['buffer']
if errno:
code = 599
error = HttpError(code, reason)
error.errno = errno
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(_pycurl.HTTP_CODE)
effective_url = curl.getinfo(_pycurl.EFFECTIVE_URL)
buffer.seek(0)
# try:
request = info['request']
request.on_ready(self.Response(
request=request, code=code, headers=info['headers'],
buffer=buffer, effective_url=effective_url, error=error,
))
def _setup_request(self, curl, request, buffer, headers, _pycurl=pycurl):
setopt = curl.setopt
setopt(_pycurl.URL, bytes_to_str(request.url))
# see tornado curl client
request.headers.setdefault('Expect', '')
request.headers.setdefault('Pragma', '')
setopt(
_pycurl.HTTPHEADER,
['{}: {}'.format(*h) for h in request.headers.items()],
)
setopt(
_pycurl.HEADERFUNCTION,
partial(request.on_header or self.on_header, request.headers),
)
setopt(
_pycurl.WRITEFUNCTION, request.on_stream or buffer.write,
)
setopt(
_pycurl.FOLLOWLOCATION, request.follow_redirects,
)
setopt(
_pycurl.USERAGENT,
bytes_to_str(request.user_agent or DEFAULT_USER_AGENT),
)
if request.network_interface:
setopt(_pycurl.INTERFACE, request.network_interface)
setopt(
_pycurl.ENCODING, 'gzip,deflate' if request.use_gzip else 'none',
)
if request.proxy_host:
if not request.proxy_port:
raise ValueError('Request with proxy_host but no proxy_port')
setopt(_pycurl.PROXY, request.proxy_host)
setopt(_pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
setopt(_pycurl.PROXYUSERPWD, '{}:{}'.format(
request.proxy_username, request.proxy_password or ''))
else:
setopt(_pycurl.PROXY, '')
curl.unsetopt(_pycurl.PROXYUSERPWD)
setopt(_pycurl.SSL_VERIFYPEER, 1 if request.validate_cert else 0)
setopt(_pycurl.SSL_VERIFYHOST, 2 if request.validate_cert else 0)
if request.ca_certs is not None:
setopt(_pycurl.CAINFO, request.ca_certs)
setopt(_pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
for meth in METH_TO_CURL.values():
setopt(meth, False)
try:
meth = METH_TO_CURL[request.method]
except KeyError:
curl.setopt(_pycurl.CUSTOMREQUEST, request.method)
else:
curl.unsetopt(_pycurl.CUSTOMREQUEST)
setopt(meth, True)
if request.method in ('POST', 'PUT'):
body = request.body.encode('utf-8') if request.body else bytes()
reqbuffer = BytesIO(body)
setopt(_pycurl.READFUNCTION, reqbuffer.read)
if request.method == 'POST':
def ioctl(cmd):
if cmd == _pycurl.IOCMD_RESTARTREAD:
reqbuffer.seek(0)
setopt(_pycurl.IOCTLFUNCTION, ioctl)
setopt(_pycurl.POSTFIELDSIZE, len(body))
else:
setopt(_pycurl.INFILESIZE, len(body))
elif request.method == 'GET':
assert not request.body
if request.auth_username is not None:
auth_mode = {
'basic': _pycurl.HTTPAUTH_BASIC,
'digest': _pycurl.HTTPAUTH_DIGEST
}[request.auth_mode or 'basic']
setopt(_pycurl.HTTPAUTH, auth_mode)
userpwd = '{}:{}'.format(
request.auth_username, request.auth_password or '',
)
setopt(_pycurl.USERPWD, userpwd)
else:
curl.unsetopt(_pycurl.USERPWD)
if request.client_cert is not None:
setopt(_pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
setopt(_pycurl.SSLKEY, request.client_key)
if request.on_prepare is not None:
request.on_prepare(curl)
|
from unittest import TestCase
import pandas as pd
from scattertext.CorpusFromParsedDocuments import CorpusFromParsedDocuments
from scattertext.WhitespaceNLP import whitespace_nlp
from scattertext.representations.Word2VecFromParsedCorpus import GensimPhraseAdder
from scattertext.test.test_corpusFromPandas import get_docs_categories
class TestGensimPhraseAdder(TestCase):
@classmethod
def setUp(cls):
cls.categories, cls.documents = get_docs_categories()
cls.parsed_docs = []
for doc in cls.documents:
cls.parsed_docs.append(whitespace_nlp(doc))
cls.df = pd.DataFrame({'category': cls.categories,
'author': ['a', 'a', 'c', 'c', 'c',
'c', 'd', 'd', 'e', 'e'],
'parsed': cls.parsed_docs,
'document_lengths': [len(doc) for doc in cls.documents]})
cls.corpus = CorpusFromParsedDocuments(cls.df, 'category', 'parsed').build()
def test_add_phrase(self):
adder = GensimPhraseAdder()
# to do
#res = adder.add_phrases(self.corpus)
# self.fail()
|
import logging
SUPPORTED_SCALING_FACTORS = [(7, 8), (3, 4), (5, 8), (1, 2), (3, 8), (1, 4), (1, 8)]
_LOGGER = logging.getLogger(__name__)
def scale_jpeg_camera_image(cam_image, width, height):
"""Scale a camera image as close as possible to one of the supported scaling factors."""
turbo_jpeg = TurboJPEGSingleton.instance()
if not turbo_jpeg:
return cam_image.content
(current_width, current_height, _, _) = turbo_jpeg.decode_header(cam_image.content)
if current_width <= width or current_height <= height:
return cam_image.content
ratio = width / current_width
scaling_factor = SUPPORTED_SCALING_FACTORS[-1]
for supported_sf in SUPPORTED_SCALING_FACTORS:
if ratio >= (supported_sf[0] / supported_sf[1]):
scaling_factor = supported_sf
break
return turbo_jpeg.scale_with_quality(
cam_image.content,
scaling_factor=scaling_factor,
quality=75,
)
class TurboJPEGSingleton:
"""
Load TurboJPEG only once.
Ensures we do not log load failures each snapshot
since camera image fetches happen every few
seconds.
"""
__instance = None
@staticmethod
def instance():
"""Singleton for TurboJPEG."""
if TurboJPEGSingleton.__instance is None:
TurboJPEGSingleton()
return TurboJPEGSingleton.__instance
def __init__(self):
"""Try to create TurboJPEG only once."""
try:
# TurboJPEG checks for libturbojpeg
# when its created, but it imports
# numpy which may or may not work so
# we have to guard the import here.
from turbojpeg import TurboJPEG # pylint: disable=import-outside-toplevel
TurboJPEGSingleton.__instance = TurboJPEG()
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"libturbojpeg is not installed, cameras may impact HomeKit performance"
)
TurboJPEGSingleton.__instance = False
|
from gitless import core
from . import helpers, pprint
def parser(subparsers, _):
"""Adds the tag parser to the given subparsers object."""
desc = 'list, create, or delete tags'
tag_parser = subparsers.add_parser(
'tag', help=desc, description=desc.capitalize(), aliases=['tg'])
list_group = tag_parser.add_argument_group('list tags')
list_group.add_argument(
'-r', '--remote',
help='list remote tags in addition to local tags',
action='store_true')
create_group = tag_parser.add_argument_group('create tags')
create_group.add_argument(
'-c', '--create', nargs='+', help='create tag(s)', dest='create_t',
metavar='tag')
create_group.add_argument(
'-ci', '--commit',
help='the commit to tag (only relevant if a new '
'tag is created; defaults to the HEAD commit)', dest='ci')
delete_group = tag_parser.add_argument_group('delete tags')
delete_group.add_argument(
'-d', '--delete', nargs='+', help='delete tag(s)', dest='delete_t',
metavar='tag')
tag_parser.set_defaults(func=main)
def main(args, repo):
is_list = bool(args.remote)
is_create = bool(args.create_t or args.ci)
is_delete = bool(args.delete_t)
if is_list + is_create + is_delete > 1:
pprint.err('Invalid flag combination')
pprint.err_exp('Can only do one of list, create, or delete tags at a time')
return False
ret = True
if args.create_t:
ret = _do_create(args.create_t, args.ci or 'HEAD', repo)
elif args.delete_t:
ret = _do_delete(args.delete_t, repo)
else:
_do_list(repo, args.remote)
return ret
def _do_list(repo, list_remote):
pprint.msg('List of tags:')
pprint.exp('do gl tag -c t to create tag t')
pprint.exp('do gl tag -d t to delete tag t')
pprint.blank()
no_tags = True
for t in (repo.lookup_tag(n) for n in sorted(repo.listall_tags())):
pprint.item('{0} ➜ tags {1}'.format(t, pprint.commit_str(t.commit)))
no_tags = False
if list_remote:
for r in sorted(repo.remotes, key=lambda r: r.name):
for t in (r.lookup_tag(n) for n in sorted(r.listall_tags())):
pprint.item('{0} ➜ tags {1}'.format(t, pprint.commit_str(t.commit)))
no_tags = False
if no_tags:
pprint.item('There are no tags to list')
def _do_create(create_t, dp, repo):
errors_found = False
try:
target = repo.revparse_single(dp)
except KeyError:
raise ValueError('Invalid commit {0}'.format(dp))
for t_name in create_t:
r = repo
remote_str = ''
if '/' in t_name: # might want to create a remote tag
maybe_remote, maybe_remote_tag = t_name.split('/', 1)
if maybe_remote in repo.remotes:
r = repo.remotes[maybe_remote]
t_name = maybe_remote_tag
conf_msg = 'Tag {0} will be created in remote repository {1}'.format(
t_name, maybe_remote)
if not pprint.conf_dialog(conf_msg):
pprint.msg(
'Aborted: creation of tag {0} in remote repository {1}'.format(
t_name, maybe_remote))
continue
remote_str = ' in remote repository {0}'.format(maybe_remote)
try:
r.create_tag(t_name, target)
pprint.ok('Created new tag {0}{1}'.format(t_name, remote_str))
except ValueError as e:
pprint.err(e)
errors_found = True
return not errors_found
def _do_delete(delete_t, repo):
errors_found = False
for t_name in delete_t:
try:
t = helpers.get_tag(t_name, repo)
tag_str = 'Tag {0} will be removed'.format(t.tag_name)
remote_str = ''
if isinstance(t, core.RemoteTag):
remote_str = 'from remote repository {0}'.format(t.remote_name)
if not pprint.conf_dialog('{0} {1}'.format(tag_str, remote_str)):
pprint.msg('Aborted: removal of tag {0}'.format(t))
continue
t.delete()
pprint.ok('Tag {0} removed successfully'.format(t))
except ValueError as e:
pprint.err(e)
errors_found = True
return not errors_found
|
import warnings
from typing import Awaitable, TYPE_CHECKING, Dict
import discord
from .commands import (
bot_has_permissions,
bot_in_a_guild,
has_permissions,
is_owner,
guildowner,
guildowner_or_permissions,
admin,
admin_or_permissions,
mod,
mod_or_permissions,
)
from .utils.mod import (
is_mod_or_superior as _is_mod_or_superior,
is_admin_or_superior as _is_admin_or_superior,
check_permissions as _check_permissions,
)
if TYPE_CHECKING:
from .bot import Red
from .commands import Context
__all__ = [
"bot_has_permissions",
"bot_in_a_guild",
"has_permissions",
"is_owner",
"guildowner",
"guildowner_or_permissions",
"admin",
"admin_or_permissions",
"mod",
"mod_or_permissions",
"is_mod_or_superior",
"is_admin_or_superior",
"check_permissions",
]
def is_mod_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.is_mod_or_superior` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.is_mod_or_superior` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return _is_mod_or_superior(ctx.bot, ctx.author)
def is_admin_or_superior(ctx: "Context") -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.is_admin_or_superior` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.is_admin_or_superior` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return _is_admin_or_superior(ctx.bot, ctx.author)
def check_permissions(ctx: "Context", perms: Dict[str, bool]) -> Awaitable[bool]:
warnings.warn(
"`redbot.core.checks.check_permissions` is deprecated and will be removed in a future "
"release, please use `redbot.core.utils.mod.check_permissions`.",
DeprecationWarning,
stacklevel=2,
)
return _check_permissions(ctx, perms)
|
from contextlib import contextmanager
from datetime import datetime
import mock
from freezegun import freeze_time
from paasta_tools.autoscaling import load_boost
TEST_CURRENT_TIME = datetime(2020, 2, 14)
@contextmanager
def patch_zk_client(mock_values=None):
with mock.patch(
"paasta_tools.utils.KazooClient", autospec=True
) as mock_client, mock.patch(
"paasta_tools.utils.load_system_paasta_config", autospec=True
):
def mock_get(key):
if not mock_values or key not in mock_values:
raise load_boost.NoNodeError
return (mock_values[key], None)
mock_client.return_value = mock.Mock(get=mock_get)
yield mock_client()
def test_get_zk_cluster_boost_path():
fake_region = "westeros-1"
fake_pool = "default"
expected_result = "/paasta_cluster_autoscaler/westeros-1/default/boost"
assert (
load_boost.get_zk_cluster_boost_path(fake_region, fake_pool) == expected_result
)
def test_get_boost_values():
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
fake_end_time = 12345.0
fake_boost_factor = 1.5
fake_expected_load = 80
with patch_zk_client(
{
base_path + "/end_time": str(fake_end_time).encode("utf-8"),
base_path + "/factor": str(fake_boost_factor).encode("utf-8"),
base_path + "/expected_load": str(fake_expected_load).encode("utf-8"),
}
) as mock_zk_client:
assert load_boost.get_boost_values(
zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost",
zk=mock_zk_client,
) == load_boost.BoostValues(
end_time=fake_end_time,
boost_factor=fake_boost_factor,
expected_load=fake_expected_load,
)
def test_get_boost_values_when_no_values_exist():
fake_region = "westeros-1"
fake_pool = "default"
with patch_zk_client() as mock_zk_client:
assert load_boost.get_boost_values(
zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost",
zk=mock_zk_client,
) == load_boost.BoostValues(end_time=0, boost_factor=1.0, expected_load=0)
@freeze_time(TEST_CURRENT_TIME)
def test_set_boost_factor_with_defaults():
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
with patch_zk_client() as mock_zk_client:
load_boost.set_boost_factor(base_path)
expected_end_time = (
float(TEST_CURRENT_TIME.timestamp()) + 60 * load_boost.DEFAULT_BOOST_DURATION
)
assert mock_zk_client.set.call_args_list == [
mock.call(base_path + "/end_time", str(expected_end_time).encode("utf-8")),
mock.call(
base_path + "/factor", str(load_boost.DEFAULT_BOOST_FACTOR).encode("utf-8")
),
mock.call(base_path + "/expected_load", "0".encode("utf-8")),
]
def test_set_boost_factor():
pass
@freeze_time(TEST_CURRENT_TIME)
def test_set_boost_factor_with_active_boost():
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
fake_end_time = float(TEST_CURRENT_TIME.timestamp()) + 10
fake_boost_factor = 1.5
fake_expected_load = 80
# patch zk client so that it returns an end time that
# indicates an active boost
with patch_zk_client(
{
base_path + "/end_time": str(fake_end_time).encode("utf-8"),
base_path + "/factor": str(fake_boost_factor).encode("utf-8"),
base_path + "/expected_load": str(fake_expected_load).encode("utf-8"),
}
):
# by default, set boost should not go through if there's an active boost
assert not load_boost.set_boost_factor(zk_boost_path=base_path)
@freeze_time(TEST_CURRENT_TIME)
def test_set_boost_factor_with_active_boost_override():
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
fake_end_time = float(TEST_CURRENT_TIME.timestamp()) + 10
fake_boost_factor = 1.5
fake_expected_load = 80
mock_boost_values = {
base_path + "/end_time": str(fake_end_time).encode("utf-8"),
base_path + "/factor": str(fake_boost_factor).encode("utf-8"),
base_path + "/expected_load": str(fake_expected_load).encode("utf-8"),
}
# patch zk client so that it returns an end time that
# indicates an active boost
with patch_zk_client(mock_boost_values) as mock_zk_client:
# we need the zk.set to actually override the initial mocked values
def mock_set(key, value):
mock_boost_values[key] = value
mock_zk_client.set = mock_set
# set boost will go through with an active boost if override is toggled on
assert load_boost.set_boost_factor(
zk_boost_path=f"/paasta_cluster_autoscaler/{fake_region}/{fake_pool}/boost",
override=True,
)
@freeze_time(TEST_CURRENT_TIME)
def test_clear_boost():
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
with patch_zk_client() as mock_zk_client:
load_boost.clear_boost(base_path, region=fake_region, pool=fake_pool)
expected_end_time = float(TEST_CURRENT_TIME.timestamp())
assert mock_zk_client.set.call_args_list == [
mock.call(base_path + "/end_time", str(expected_end_time).encode("utf-8")),
mock.call(base_path + "/factor", "1".encode("utf-8")),
mock.call(base_path + "/expected_load", "0".encode("utf-8")),
]
@mock.patch("paasta_tools.autoscaling.load_boost.clusterman_metrics", autospec=True)
@mock.patch(
"paasta_tools.autoscaling.load_boost.load_system_paasta_config", autospec=True
)
@freeze_time(TEST_CURRENT_TIME)
def test_send_clusterman_metrics(
mock_load_system_paasta_config, mock_clusterman_metrics
):
fake_region = "westeros-1"
fake_pool = "default"
base_path = load_boost.get_zk_cluster_boost_path(fake_region, fake_pool)
mock_load_system_paasta_config.return_value.get_cluster.return_value = (
"westeros-prod"
)
mock_clusterman_metrics.generate_key_with_dimensions = (
lambda key, dims: f"{key}|{dims}"
)
mock_writer = (
mock_clusterman_metrics.ClustermanMetricsBotoClient().get_writer().__enter__()
)
with patch_zk_client():
load_boost.set_boost_factor(
zk_boost_path=base_path,
region=fake_region,
pool=fake_pool,
factor=1.3,
duration_minutes=10,
)
expected_metrics_dimensions = {"cluster": "westeros-prod", "pool": "default"}
expected_metrics_key = f"boost_factor|{expected_metrics_dimensions}"
assert mock_writer.send.call_args_list == [
mock.call((expected_metrics_key, TEST_CURRENT_TIME.timestamp(), 1.3)),
mock.call((expected_metrics_key, TEST_CURRENT_TIME.timestamp() + 10 * 60, 1.0)),
]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl.testing import absltest
# This stanza exercises setting $TEST_RANDOMIZE_ORDERING_SEED *after* importing
# the absltest library.
if os.environ.get('LATE_SET_TEST_RANDOMIZE_ORDERING_SEED', ''):
os.environ['TEST_RANDOMIZE_ORDERING_SEED'] = os.environ[
'LATE_SET_TEST_RANDOMIZE_ORDERING_SEED']
class ClassA(absltest.TestCase):
def test_a(self):
sys.stderr.write('\nclass A test A\n')
def test_b(self):
sys.stderr.write('\nclass A test B\n')
def test_c(self):
sys.stderr.write('\nclass A test C\n')
if __name__ == '__main__':
absltest.main()
|
from flexx import app, ui
class Red(ui.Widget):
CSS = '.flx-Red { background: #ff0000;}'
class Deep1(ui.Widget):
# This was broken on Chrome earlier
def init(self):
with ui.VBox():
ui.Label(text='Widget in a vbox in a widget in a vbox')
with ui.VBox(flex=1):
with ui.Widget(flex=1):
with ui.VBox():
ui.Label(text='---')
Red(flex=1)
class Deep2(ui.Widget):
def init(self):
with ui.VBox():
ui.Label(text='Widget in a vbox in a vbox in a vbox')
with ui.VBox(flex=1):
with ui.VBox(flex=1):
ui.Label(text='---')
Red(flex=1)
class Deep3(ui.Widget):
def init(self):
with ui.VBox():
ui.Label(text='Widget in a vbox in a hbox in a vbox')
with ui.HBox(flex=1):
ui.Label(text='|||')
with ui.VBox(flex=1):
ui.Label(text='---')
Red(flex=1)
class Deep4(ui.Widget):
def init(self):
with ui.HBox():
ui.Label(text='Widget in a hbox in a widget in a hbox')
with ui.HBox(flex=1):
with ui.Widget(flex=1):
with ui.HBox():
ui.Label(text='|||')
Red(flex=1)
class Deep5(ui.Widget):
def init(self):
with ui.HBox():
ui.Label(text='Widget in a hbox in a hbox in a hbox')
with ui.HBox(flex=1):
with ui.HBox(flex=1):
ui.Label(text='|||')
Red(flex=1)
class Deep6(ui.Widget):
def init(self):
with ui.HBox():
ui.Label(text='Widget in a hbox in a vbox in a hbox')
with ui.VBox(flex=1):
ui.Label(text='---')
with ui.HBox(flex=1):
ui.Label(text='|||')
Red(flex=1)
class Deep(ui.Widget):
def init(self):
with ui.HFix():
with ui.HFix():
Deep1()
Deep2()
Deep3()
with ui.VFix():
Deep4()
Deep5()
Deep6()
if __name__ == '__main__':
app.launch(Deep, 'app')
app.run()
|
from ...common.interfaces import AbstractPlugin
class Plugin(AbstractPlugin):
SECTION = 'rcassert'
def __init__(self, core, cfg, name):
AbstractPlugin.__init__(self, core, cfg, name)
self.ok_codes = []
self.fail_code = 10
@staticmethod
def get_key():
return __file__
def get_available_options(self):
return ["pass", "fail_code"]
def configure(self):
codes = self.get_option("pass", '').split(' ')
for code in codes:
if code:
self.ok_codes.append(int(code))
self.fail_code = int(self.get_option("fail_code", self.fail_code))
def post_process(self, retcode):
if not self.ok_codes:
return retcode
for code in self.ok_codes:
self.log.debug("Comparing %s with %s codes", code, retcode)
if code == int(retcode):
self.log.info(
"Exit code %s was changed to 0 by RCAssert plugin", code)
return 0
self.log.info(
"Changing exit code to %s because RCAssert pass list was unsatisfied",
self.fail_code)
return self.fail_code
|
import pandas as pd
import pytz
from qstrader.system.rebalance.rebalance import Rebalance
class EndOfMonthRebalance(Rebalance):
"""
Generates a list of rebalance timestamps for pre- or post-market,
for the final calendar day of the month between the starting and
ending dates provided.
All timestamps produced are set to UTC.
Parameters
----------
start_dt : `pd.Timestamp`
The starting datetime of the rebalance range.
end_dt : `pd.Timestamp`
The ending datetime of the rebalance range.
pre_market : `Boolean`, optional
Whether to carry out the rebalance at market open/close on
the final day of the month. Defaults to False, i.e at
market close.
"""
def __init__(
self,
start_dt,
end_dt,
pre_market=False
):
self.start_dt = start_dt
self.end_dt = end_dt
self.market_time = self._set_market_time(pre_market)
self.rebalances = self._generate_rebalances()
def _set_market_time(self, pre_market):
"""
Determines whether to use market open or market close
as the rebalance time.
Parameters
----------
pre_market : `Boolean`
Whether the rebalance is carried out at market open/close.
Returns
-------
`str`
The time string used for Pandas timestamp construction.
"""
return "14:30:00" if pre_market else "21:00:00"
def _generate_rebalances(self):
"""
Utilise the Pandas date_range method to create the appropriate
list of rebalance timestamps.
Returns
-------
`List[pd.Timestamp]`
The list of rebalance timestamps.
"""
rebalance_dates = pd.date_range(
start=self.start_dt,
end=self.end_dt,
freq='BM'
)
rebalance_times = [
pd.Timestamp(
"%s %s" % (date, self.market_time), tz=pytz.utc
)
for date in rebalance_dates
]
return rebalance_times
|
import posixpath
from absl import flags
from perfkitbenchmarker.linux_packages import cuda_toolkit
from perfkitbenchmarker.linux_packages import nvidia_driver
FLAGS = flags.FLAGS
flags.DEFINE_string('tf_cpu_pip_package',
'https://anaconda.org/intel/tensorflow/1.12.0/download/'
'tensorflow-1.12.0-cp27-cp27mu-linux_x86_64.whl',
'TensorFlow CPU pip package to install. By default, PKB '
'will install an Intel-optimized CPU build when using '
'CPUs.')
flags.DEFINE_string('tf_gpu_pip_package', 'tensorflow-gpu==1.12.0',
'TensorFlow GPU pip package to install. By default, PKB '
'will install tensorflow-gpu==1.12 when using GPUs.')
flags.DEFINE_string(
't2t_pip_package', 'tensor2tensor==1.7',
'Tensor2Tensor pip package to install. By default, PKB '
'will install tensor2tensor==1.7 .')
flags.DEFINE_string('tf_cnn_benchmarks_branch',
'cnn_tf_v1.12_compatible',
'TensorFlow CNN branchmarks branch that is compatible with '
'A TensorFlow version.')
NCCL_URL = 'https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb'
NCCL_PACKAGE = 'nvidia-machine-learning-repo-ubuntu1604_1.0.0-1_amd64.deb'
def GetEnvironmentVars(vm):
"""Return a string containing TensorFlow-related environment variables.
Args:
vm: vm to get environment varibles
Returns:
string of environment variables
"""
env_vars = []
if nvidia_driver.CheckNvidiaGpuExists(vm):
output, _ = vm.RemoteCommand('getconf LONG_BIT', should_log=True)
long_bit = output.strip()
lib_name = 'lib' if long_bit == '32' else 'lib64'
env_vars.extend([
'PATH=%s${PATH:+:${PATH}}' %
posixpath.join(cuda_toolkit.CUDA_HOME, 'bin'),
'CUDA_HOME=%s' % cuda_toolkit.CUDA_HOME,
'LD_LIBRARY_PATH=%s${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}' %
posixpath.join(cuda_toolkit.CUDA_HOME, lib_name)])
if FLAGS.aws_s3_region:
env_vars.append('AWS_REGION={}'.format(FLAGS.aws_s3_region))
return ' '.join(env_vars)
def GetTensorFlowVersion(vm):
"""Returns the version of tensorflow installed on the vm.
Args:
vm: the target vm on which to check the tensorflow version
Returns:
installed python tensorflow version as a string
"""
stdout, _ = vm.RemoteCommand(
('echo -e "import tensorflow\nprint(tensorflow.__version__)" | {0} python'
.format(GetEnvironmentVars(vm)))
)
return stdout.strip()
def Install(vm):
"""Installs TensorFlow on the VM."""
has_gpu = nvidia_driver.CheckNvidiaGpuExists(vm)
tf_pip_package = (FLAGS.tf_gpu_pip_package if has_gpu else
FLAGS.tf_cpu_pip_package)
if has_gpu:
vm.Install('cuda_toolkit')
vm.Install('nccl')
vm.Install('cudnn')
vm.Install('pip')
vm.RemoteCommand('sudo pip install requests')
vm.RemoteCommand('sudo pip install --upgrade absl-py')
vm.RemoteCommand('sudo pip install --upgrade %s' % tf_pip_package,
should_log=True)
vm.RemoteCommand(
'sudo pip install --upgrade %s' % FLAGS.t2t_pip_package, should_log=True)
vm.InstallPackages('git')
_, _, retcode = vm.RemoteHostCommandWithReturnCode(
'test -d benchmarks', ignore_failure=True, suppress_warning=True)
if retcode != 0:
vm.RemoteCommand(
'git clone https://github.com/tensorflow/benchmarks.git',
should_log=True)
vm.RemoteCommand(
'cd benchmarks && git checkout {}'.format(FLAGS.tf_cnn_benchmarks_branch)
)
if FLAGS.cloud == 'AWS' and FLAGS.tf_data_dir and (
not FLAGS.tf_use_local_data):
vm.Install('aws_credentials')
def Uninstall(vm):
"""Uninstalls TensorFlow on the VM."""
vm.RemoteCommand('sudo pip uninstall tensorflow',
should_log=True)
|
from flexx import flx
class Split(flx.Widget):
def init(self):
with flx.HSplit():
flx.Widget(style='background:#f00')
with flx.VSplit():
flx.Widget(style='background:#0f0')
with flx.HSplit():
flx.Widget(style='background:#ff0')
with flx.VSplit():
flx.Widget(style='background:#f0f')
with flx.HSplit():
flx.Widget(style='background:#0ff')
flx.Widget(style='background:#00f')
if __name__ == '__main__':
m = flx.launch(Split)
flx.run()
|
from datetime import timedelta
import logging
import voluptuous as vol
from xboxapi import Client
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY, CONF_SCAN_INTERVAL
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_XUID = "xuid"
ICON = "mdi:microsoft-xbox"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_XUID): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Xbox platform."""
api = Client(api_key=config[CONF_API_KEY])
entities = []
# request profile info to check api connection
response = api.api_get("profile")
if not response.ok:
_LOGGER.error(
"Can't setup X API connection. Check your account or "
"api key on xapi.us. Code: %s Description: %s ",
response.status_code,
response.reason,
)
return
users = config[CONF_XUID]
interval = timedelta(minutes=1 * len(users))
interval = config.get(CONF_SCAN_INTERVAL, interval)
for xuid in users:
gamercard = get_user_gamercard(api, xuid)
if gamercard is None:
continue
entities.append(XboxSensor(api, xuid, gamercard, interval))
if entities:
add_entities(entities, True)
def get_user_gamercard(api, xuid):
"""Get profile info."""
gamercard = api.gamer(gamertag="", xuid=xuid).get("gamercard")
_LOGGER.debug("User gamercard: %s", gamercard)
if gamercard.get("success", True) and gamercard.get("code") is None:
return gamercard
_LOGGER.error(
"Can't get user profile %s. Error Code: %s Description: %s",
xuid,
gamercard.get("code", "unknown"),
gamercard.get("description", "unknown"),
)
return None
class XboxSensor(Entity):
"""A class for the Xbox account."""
def __init__(self, api, xuid, gamercard, interval):
"""Initialize the sensor."""
self._state = None
self._presence = []
self._xuid = xuid
self._api = api
self._gamertag = gamercard["gamertag"]
self._gamerscore = gamercard["gamerscore"]
self._interval = interval
self._picture = gamercard["gamerpicSmallSslImagePath"]
self._tier = gamercard["tier"]
@property
def name(self):
"""Return the name of the sensor."""
return self._gamertag
@property
def should_poll(self):
"""Return False as this entity has custom polling."""
return False
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {"gamerscore": self._gamerscore, "tier": self._tier}
for device in self._presence:
for title in device["titles"]:
attributes[f'{device["type"]} {title["placement"]}'] = title["name"]
return attributes
@property
def entity_picture(self):
"""Avatar of the account."""
return self._picture
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
async def async_added_to_hass(self):
"""Start custom polling."""
@callback
def async_update(event_time=None):
"""Update the entity."""
self.async_schedule_update_ha_state(True)
async_track_time_interval(self.hass, async_update, self._interval)
def update(self):
"""Update state data from Xbox API."""
presence = self._api.gamer(gamertag="", xuid=self._xuid).get("presence")
_LOGGER.debug("User presence: %s", presence)
self._state = presence["state"]
self._presence = presence.get("devices", [])
|
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.helpers import intent
async def test_add_item(hass, sl_setup):
"""Test adding an item intent."""
response = await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
assert response.speech["plain"]["speech"] == "I've added beer to your shopping list"
async def test_recent_items_intent(hass, sl_setup):
"""Test recent items."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "soda"}}
)
response = await intent.async_handle(hass, "test", "HassShoppingListLastItems")
assert (
response.speech["plain"]["speech"]
== "These are the top 3 items on your shopping list: soda, wine, beer"
)
async def test_deprecated_api_get_all(hass, hass_client, sl_setup):
"""Test the API."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
client = await hass_client()
resp = await client.get("/api/shopping_list")
assert resp.status == 200
data = await resp.json()
assert len(data) == 2
assert data[0]["name"] == "beer"
assert not data[0]["complete"]
assert data[1]["name"] == "wine"
assert not data[1]["complete"]
async def test_ws_get_items(hass, hass_ws_client, sl_setup):
"""Test get shopping_list items websocket command."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "shopping_list/items"})
msg = await client.receive_json()
assert msg["success"] is True
assert msg["id"] == 5
assert msg["type"] == TYPE_RESULT
assert msg["success"]
data = msg["result"]
assert len(data) == 2
assert data[0]["name"] == "beer"
assert not data[0]["complete"]
assert data[1]["name"] == "wine"
assert not data[1]["complete"]
async def test_deprecated_api_update(hass, hass_client, sl_setup):
"""Test the API."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
beer_id = hass.data["shopping_list"].items[0]["id"]
wine_id = hass.data["shopping_list"].items[1]["id"]
client = await hass_client()
resp = await client.post(
f"/api/shopping_list/item/{beer_id}", json={"name": "soda"}
)
assert resp.status == 200
data = await resp.json()
assert data == {"id": beer_id, "name": "soda", "complete": False}
resp = await client.post(
f"/api/shopping_list/item/{wine_id}", json={"complete": True}
)
assert resp.status == 200
data = await resp.json()
assert data == {"id": wine_id, "name": "wine", "complete": True}
beer, wine = hass.data["shopping_list"].items
assert beer == {"id": beer_id, "name": "soda", "complete": False}
assert wine == {"id": wine_id, "name": "wine", "complete": True}
async def test_ws_update_item(hass, hass_ws_client, sl_setup):
"""Test update shopping_list item websocket command."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
beer_id = hass.data["shopping_list"].items[0]["id"]
wine_id = hass.data["shopping_list"].items[1]["id"]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 5,
"type": "shopping_list/items/update",
"item_id": beer_id,
"name": "soda",
}
)
msg = await client.receive_json()
assert msg["success"] is True
data = msg["result"]
assert data == {"id": beer_id, "name": "soda", "complete": False}
await client.send_json(
{
"id": 6,
"type": "shopping_list/items/update",
"item_id": wine_id,
"complete": True,
}
)
msg = await client.receive_json()
assert msg["success"] is True
data = msg["result"]
assert data == {"id": wine_id, "name": "wine", "complete": True}
beer, wine = hass.data["shopping_list"].items
assert beer == {"id": beer_id, "name": "soda", "complete": False}
assert wine == {"id": wine_id, "name": "wine", "complete": True}
async def test_api_update_fails(hass, hass_client, sl_setup):
"""Test the API."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
client = await hass_client()
resp = await client.post("/api/shopping_list/non_existing", json={"name": "soda"})
assert resp.status == HTTP_NOT_FOUND
beer_id = hass.data["shopping_list"].items[0]["id"]
resp = await client.post(f"/api/shopping_list/item/{beer_id}", json={"name": 123})
assert resp.status == 400
async def test_ws_update_item_fail(hass, hass_ws_client, sl_setup):
"""Test failure of update shopping_list item websocket command."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 5,
"type": "shopping_list/items/update",
"item_id": "non_existing",
"name": "soda",
}
)
msg = await client.receive_json()
assert msg["success"] is False
data = msg["error"]
assert data == {"code": "item_not_found", "message": "Item not found"}
await client.send_json({"id": 6, "type": "shopping_list/items/update", "name": 123})
msg = await client.receive_json()
assert msg["success"] is False
async def test_deprecated_api_clear_completed(hass, hass_client, sl_setup):
"""Test the API."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
beer_id = hass.data["shopping_list"].items[0]["id"]
wine_id = hass.data["shopping_list"].items[1]["id"]
client = await hass_client()
# Mark beer as completed
resp = await client.post(
f"/api/shopping_list/item/{beer_id}", json={"complete": True}
)
assert resp.status == 200
resp = await client.post("/api/shopping_list/clear_completed")
assert resp.status == 200
items = hass.data["shopping_list"].items
assert len(items) == 1
assert items[0] == {"id": wine_id, "name": "wine", "complete": False}
async def test_ws_clear_items(hass, hass_ws_client, sl_setup):
"""Test clearing shopping_list items websocket command."""
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "beer"}}
)
await intent.async_handle(
hass, "test", "HassShoppingListAddItem", {"item": {"value": "wine"}}
)
beer_id = hass.data["shopping_list"].items[0]["id"]
wine_id = hass.data["shopping_list"].items[1]["id"]
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 5,
"type": "shopping_list/items/update",
"item_id": beer_id,
"complete": True,
}
)
msg = await client.receive_json()
assert msg["success"] is True
await client.send_json({"id": 6, "type": "shopping_list/items/clear"})
msg = await client.receive_json()
assert msg["success"] is True
items = hass.data["shopping_list"].items
assert len(items) == 1
assert items[0] == {"id": wine_id, "name": "wine", "complete": False}
async def test_deprecated_api_create(hass, hass_client, sl_setup):
"""Test the API."""
client = await hass_client()
resp = await client.post("/api/shopping_list/item", json={"name": "soda"})
assert resp.status == 200
data = await resp.json()
assert data["name"] == "soda"
assert data["complete"] is False
items = hass.data["shopping_list"].items
assert len(items) == 1
assert items[0]["name"] == "soda"
assert items[0]["complete"] is False
async def test_deprecated_api_create_fail(hass, hass_client, sl_setup):
"""Test the API."""
client = await hass_client()
resp = await client.post("/api/shopping_list/item", json={"name": 1234})
assert resp.status == 400
assert len(hass.data["shopping_list"].items) == 0
async def test_ws_add_item(hass, hass_ws_client, sl_setup):
"""Test adding shopping_list item websocket command."""
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "shopping_list/items/add", "name": "soda"})
msg = await client.receive_json()
assert msg["success"] is True
data = msg["result"]
assert data["name"] == "soda"
assert data["complete"] is False
items = hass.data["shopping_list"].items
assert len(items) == 1
assert items[0]["name"] == "soda"
assert items[0]["complete"] is False
async def test_ws_add_item_fail(hass, hass_ws_client, sl_setup):
"""Test adding shopping_list item failure websocket command."""
client = await hass_ws_client(hass)
await client.send_json({"id": 5, "type": "shopping_list/items/add", "name": 123})
msg = await client.receive_json()
assert msg["success"] is False
assert len(hass.data["shopping_list"].items) == 0
|
import quantities as pq
from pylatex.quantities import _dimensionality_to_siunitx, Quantity
def test_quantity():
v = 1 * pq.m/pq.s
q1 = Quantity(v)
assert q1.dumps() == r'\SI{1.0}{\meter\per\second}'
q2 = Quantity(v, format_cb=lambda x: str(int(x)))
assert q2.dumps() == r'\SI{1}{\meter\per\second}'
q3 = Quantity(v, options={'zero-decimal-to-integer': 'true'})
ref = r'\SI[zero-decimal-to-integer=true]{1.0}{\meter\per\second}'
assert q3.dumps() == ref
def test_quantity_float():
q1 = Quantity(42.0)
assert q1.dumps() == r'\num{42.0}'
def test_quantity_uncertain():
t = pq.UncertainQuantity(7., pq.second, 1.)
q1 = Quantity(t)
assert q1.dumps() == r'\SI{7.0 +- 1.0}{\second}'
def test_dimensionality_to_siunitx():
assert _dimensionality_to_siunitx((pq.volt/pq.kelvin).dimensionality) == \
r'\volt\per\Kelvin'
if __name__ == '__main__':
test_quantity()
test_quantity_uncertain()
test_dimensionality_to_siunitx()
|
import diamond.collector
import os
class NfsdCollector(diamond.collector.Collector):
PROC = '/proc/net/rpc/nfsd'
def get_default_config_help(self):
config_help = super(NfsdCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NfsdCollector, self).get_default_config()
config.update({
'path': 'nfsd'
})
return config
def collect(self):
"""
Collect stats
"""
if os.access(self.PROC, os.R_OK):
results = {}
# Open file
file = open(self.PROC)
for line in file:
line = line.split()
if line[0] == 'rc':
results['reply_cache.hits'] = line[1]
results['reply_cache.misses'] = line[2]
results['reply_cache.nocache'] = line[3]
elif line[0] == 'fh':
results['filehandle.stale'] = line[1]
results['filehandle.total-lookups'] = line[2]
results['filehandle.anonlookups'] = line[3]
results['filehandle.dir-not-in-cache'] = line[4]
results['filehandle.nodir-not-in-cache'] = line[5]
elif line[0] == 'io':
results['input_output.bytes-read'] = line[1]
results['input_output.bytes-written'] = line[2]
elif line[0] == 'th':
results['threads.threads'] = line[1]
results['threads.fullcnt'] = line[2]
results['threads.10-20-pct'] = line[3]
results['threads.20-30-pct'] = line[4]
results['threads.30-40-pct'] = line[5]
results['threads.40-50-pct'] = line[6]
results['threads.50-60-pct'] = line[7]
results['threads.60-70-pct'] = line[8]
results['threads.70-80-pct'] = line[9]
results['threads.80-90-pct'] = line[10]
results['threads.90-100-pct'] = line[11]
results['threads.100-pct'] = line[12]
elif line[0] == 'ra':
results['read-ahead.cache-size'] = line[1]
results['read-ahead.10-pct'] = line[2]
results['read-ahead.20-pct'] = line[3]
results['read-ahead.30-pct'] = line[4]
results['read-ahead.40-pct'] = line[5]
results['read-ahead.50-pct'] = line[6]
results['read-ahead.60-pct'] = line[7]
results['read-ahead.70-pct'] = line[8]
results['read-ahead.80-pct'] = line[9]
results['read-ahead.90-pct'] = line[10]
results['read-ahead.100-pct'] = line[11]
results['read-ahead.not-found'] = line[12]
elif line[0] == 'net':
results['net.cnt'] = line[1]
results['net.udpcnt'] = line[2]
results['net.tcpcnt'] = line[3]
results['net.tcpconn'] = line[4]
elif line[0] == 'rpc':
results['rpc.cnt'] = line[1]
results['rpc.badfmt'] = line[2]
results['rpc.badauth'] = line[3]
results['rpc.badclnt'] = line[4]
elif line[0] == 'proc2':
results['v2.unknown'] = line[1]
results['v2.null'] = line[2]
results['v2.getattr'] = line[3]
results['v2.setattr'] = line[4]
results['v2.root'] = line[5]
results['v2.lookup'] = line[6]
results['v2.readlink'] = line[7]
results['v2.read'] = line[8]
results['v2.wrcache'] = line[9]
results['v2.write'] = line[10]
results['v2.create'] = line[11]
results['v2.remove'] = line[12]
results['v2.rename'] = line[13]
results['v2.link'] = line[14]
results['v2.symlink'] = line[15]
results['v2.mkdir'] = line[16]
results['v2.rmdir'] = line[17]
results['v2.readdir'] = line[18]
results['v2.fsstat'] = line[19]
elif line[0] == 'proc3':
results['v3.unknown'] = line[1]
results['v3.null'] = line[2]
results['v3.getattr'] = line[3]
results['v3.setattr'] = line[4]
results['v3.lookup'] = line[5]
results['v3.access'] = line[6]
results['v3.readlink'] = line[7]
results['v3.read'] = line[8]
results['v3.write'] = line[9]
results['v3.create'] = line[10]
results['v3.mkdir'] = line[11]
results['v3.symlink'] = line[12]
results['v3.mknod'] = line[13]
results['v3.remove'] = line[14]
results['v3.rmdir'] = line[15]
results['v3.rename'] = line[16]
results['v3.link'] = line[17]
results['v3.readdir'] = line[18]
results['v3.readdirplus'] = line[19]
results['v3.fsstat'] = line[20]
results['v3.fsinfo'] = line[21]
results['v3.pathconf'] = line[22]
results['v3.commit'] = line[23]
elif line[0] == 'proc4':
results['v4.unknown'] = line[1]
results['v4.null'] = line[2]
results['v4.compound'] = line[3]
elif line[0] == 'proc4ops':
results['v4.ops.unknown'] = line[1]
results['v4.ops.op0-unused'] = line[2]
results['v4.ops.op1-unused'] = line[3]
results['v4.ops.op2-future'] = line[4]
results['v4.ops.access'] = line[5]
results['v4.ops.close'] = line[6]
results['v4.ops.commit'] = line[7]
results['v4.ops.create'] = line[8]
results['v4.ops.delegpurge'] = line[9]
results['v4.ops.delegreturn'] = line[10]
results['v4.ops.getattr'] = line[11]
results['v4.ops.getfh'] = line[12]
results['v4.ops.link'] = line[13]
results['v4.ops.lock'] = line[14]
results['v4.ops.lockt'] = line[15]
results['v4.ops.locku'] = line[16]
results['v4.ops.lookup'] = line[17]
results['v4.ops.lookup_root'] = line[18]
results['v4.ops.nverify'] = line[19]
results['v4.ops.open'] = line[20]
results['v4.ops.openattr'] = line[21]
results['v4.ops.open_conf'] = line[22]
results['v4.ops.open_dgrd'] = line[23]
results['v4.ops.putfh'] = line[24]
results['v4.ops.putpubfh'] = line[25]
results['v4.ops.putrootfh'] = line[26]
results['v4.ops.read'] = line[27]
results['v4.ops.readdir'] = line[28]
results['v4.ops.readlink'] = line[29]
results['v4.ops.remove'] = line[30]
results['v4.ops.rename'] = line[31]
results['v4.ops.renew'] = line[32]
results['v4.ops.restorefh'] = line[33]
results['v4.ops.savefh'] = line[34]
results['v4.ops.secinfo'] = line[35]
results['v4.ops.setattr'] = line[36]
results['v4.ops.setcltid'] = line[37]
results['v4.ops.setcltidconf'] = line[38]
results['v4.ops.verify'] = line[39]
results['v4.ops.write'] = line[40]
results['v4.ops.rellockowner'] = line[41]
# Close File
file.close()
for stat in results.keys():
metric_name = '.' + stat
metric_value = long(float(results[stat]))
metric_value = self.derivative(metric_name, metric_value)
self.publish(metric_name, metric_value, precision=3)
return True
return False
|
import uuid
import socket
from collections import defaultdict
from contextlib import contextmanager
from queue import Empty
from time import monotonic
from kombu.exceptions import ChannelError
from kombu.log import get_logger
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from . import virtual
try:
import consul
except ImportError:
consul = None
logger = get_logger('kombu.transport.consul')
DEFAULT_PORT = 8500
DEFAULT_HOST = 'localhost'
class LockError(Exception):
"""An error occurred while trying to acquire the lock."""
class Channel(virtual.Channel):
"""Consul Channel class which talks to the Consul Key/Value store."""
prefix = 'kombu'
index = None
timeout = '10s'
session_ttl = 30
def __init__(self, *args, **kwargs):
if consul is None:
raise ImportError('Missing python-consul library')
super().__init__(*args, **kwargs)
port = self.connection.client.port or self.connection.default_port
host = self.connection.client.hostname or DEFAULT_HOST
logger.debug('Host: %s Port: %s Timeout: %s', host, port, self.timeout)
self.queues = defaultdict(dict)
self.client = consul.Consul(host=host, port=int(port))
def _lock_key(self, queue):
return f'{self.prefix}/{queue}.lock'
def _key_prefix(self, queue):
return f'{self.prefix}/{queue}'
def _get_or_create_session(self, queue):
"""Get or create consul session.
Try to renew the session if it exists, otherwise create a new
session in Consul.
This session is used to acquire a lock inside Consul so that we achieve
read-consistency between the nodes.
Arguments:
queue (str): The name of the Queue.
Returns:
str: The ID of the session.
"""
try:
session_id = self.queues[queue]['session_id']
except KeyError:
session_id = None
return (self._renew_existing_session(session_id)
if session_id is not None else self._create_new_session())
def _renew_existing_session(self, session_id):
logger.debug('Trying to renew existing session %s', session_id)
session = self.client.session.renew(session_id=session_id)
return session.get('ID')
def _create_new_session(self):
logger.debug('Creating session %s with TTL %s',
self.lock_name, self.session_ttl)
session_id = self.client.session.create(
name=self.lock_name, ttl=self.session_ttl)
logger.debug('Created session %s with id %s',
self.lock_name, session_id)
return session_id
@contextmanager
def _queue_lock(self, queue, raising=LockError):
"""Try to acquire a lock on the Queue.
It does so by creating a object called 'lock' which is locked by the
current session..
This way other nodes are not able to write to the lock object which
means that they have to wait before the lock is released.
Arguments:
queue (str): The name of the Queue.
raising (Exception): Set custom lock error class.
Raises:
LockError: if the lock cannot be acquired.
Returns:
bool: success?
"""
self._acquire_lock(queue, raising=raising)
try:
yield
finally:
self._release_lock(queue)
def _acquire_lock(self, queue, raising=LockError):
session_id = self._get_or_create_session(queue)
lock_key = self._lock_key(queue)
logger.debug('Trying to create lock object %s with session %s',
lock_key, session_id)
if self.client.kv.put(key=lock_key,
acquire=session_id,
value=self.lock_name):
self.queues[queue]['session_id'] = session_id
return
logger.info('Could not acquire lock on key %s', lock_key)
raise raising()
def _release_lock(self, queue):
"""Try to release a lock.
It does so by simply removing the lock key in Consul.
Arguments:
queue (str): The name of the queue we want to release
the lock from.
"""
logger.debug('Removing lock key %s', self._lock_key(queue))
self.client.kv.delete(key=self._lock_key(queue))
def _destroy_session(self, queue):
"""Destroy a previously created Consul session.
Will release all locks it still might hold.
Arguments:
queue (str): The name of the Queue.
"""
logger.debug('Destroying session %s', self.queues[queue]['session_id'])
self.client.session.destroy(self.queues[queue]['session_id'])
def _new_queue(self, queue, **_):
self.queues[queue] = {'session_id': None}
return self.client.kv.put(key=self._key_prefix(queue), value=None)
def _delete(self, queue, *args, **_):
self._destroy_session(queue)
self.queues.pop(queue, None)
self._purge(queue)
def _put(self, queue, payload, **_):
"""Put `message` onto `queue`.
This simply writes a key to the K/V store of Consul
"""
key = '{}/msg/{}_{}'.format(
self._key_prefix(queue),
int(round(monotonic() * 1000)),
uuid.uuid4(),
)
if not self.client.kv.put(key=key, value=dumps(payload), cas=0):
raise ChannelError(f'Cannot add key {key!r} to consul')
def _get(self, queue, timeout=None):
"""Get the first available message from the queue.
Before it does so it acquires a lock on the Key/Value store so
only one node reads at the same time. This is for read consistency
"""
with self._queue_lock(queue, raising=Empty):
key = '{}/msg/'.format(self._key_prefix(queue))
logger.debug('Fetching key %s with index %s', key, self.index)
self.index, data = self.client.kv.get(
key=key, recurse=True,
index=self.index, wait=self.timeout,
)
try:
if data is None:
raise Empty()
logger.debug('Removing key %s with modifyindex %s',
data[0]['Key'], data[0]['ModifyIndex'])
self.client.kv.delete(key=data[0]['Key'],
cas=data[0]['ModifyIndex'])
return loads(data[0]['Value'])
except TypeError:
pass
raise Empty()
def _purge(self, queue):
self._destroy_session(queue)
return self.client.kv.delete(
key='{}/msg/'.format(self._key_prefix(queue)),
recurse=True,
)
def _size(self, queue):
size = 0
try:
key = '{}/msg/'.format(self._key_prefix(queue))
logger.debug('Fetching key recursively %s with index %s',
key, self.index)
self.index, data = self.client.kv.get(
key=key, recurse=True,
index=self.index, wait=self.timeout,
)
size = len(data)
except TypeError:
pass
logger.debug('Found %s keys under %s with index %s',
size, key, self.index)
return size
@cached_property
def lock_name(self):
return f'{socket.gethostname()}'
class Transport(virtual.Transport):
"""Consul K/V storage Transport for Kombu."""
Channel = Channel
default_port = DEFAULT_PORT
driver_type = 'consul'
driver_name = 'consul'
def __init__(self, *args, **kwargs):
if consul is None:
raise ImportError('Missing python-consul library')
super().__init__(*args, **kwargs)
self.connection_errors = (
virtual.Transport.connection_errors + (
consul.ConsulException, consul.base.ConsulException
)
)
self.channel_errors = (
virtual.Transport.channel_errors + (
consul.ConsulException, consul.base.ConsulException
)
)
def verify_connection(self, connection):
port = connection.client.port or self.default_port
host = connection.client.hostname or DEFAULT_HOST
logger.debug('Verify Consul connection to %s:%s', host, port)
try:
client = consul.Consul(host=host, port=int(port))
client.agent.self()
return True
except ValueError:
pass
return False
def driver_version(self):
return consul.__version__
|
from homeassistant.components.camera.const import DATA_CAMERA_PREFS, PREF_PRELOAD_STREAM
def mock_camera_prefs(hass, entity_id, prefs=None):
"""Fixture for cloud component."""
prefs_to_set = {PREF_PRELOAD_STREAM: True}
if prefs is not None:
prefs_to_set.update(prefs)
hass.data[DATA_CAMERA_PREFS]._prefs[entity_id] = prefs_to_set
return prefs_to_set
|
from __future__ import unicode_literals
try:
# python 2
from urllib import quote
except:
# python 3
from urllib.parse import quote
def url_encode(item):
"""url encode"""
try:
return quote(item.encode("utf-8"))
except:
return ''
|
import pytest
from coverage.misc import CoverageException
from coverage.results import format_lines, Numbers, should_fail_under
from tests.coveragetest import CoverageTest
class NumbersTest(CoverageTest):
"""Tests for coverage.py's numeric measurement summaries."""
run_in_temp_dir = False
def test_basic(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
self.assertEqual(n1.n_statements, 200)
self.assertEqual(n1.n_executed, 180)
self.assertEqual(n1.n_missing, 20)
self.assertEqual(n1.pc_covered, 90)
def test_addition(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
n2 = Numbers(n_files=1, n_statements=10, n_missing=8)
n3 = n1 + n2
self.assertEqual(n3.n_files, 2)
self.assertEqual(n3.n_statements, 210)
self.assertEqual(n3.n_executed, 182)
self.assertEqual(n3.n_missing, 28)
self.assertAlmostEqual(n3.pc_covered, 86.666666666)
def test_sum(self):
n1 = Numbers(n_files=1, n_statements=200, n_missing=20)
n2 = Numbers(n_files=1, n_statements=10, n_missing=8)
n3 = sum([n1, n2])
self.assertEqual(n3.n_files, 2)
self.assertEqual(n3.n_statements, 210)
self.assertEqual(n3.n_executed, 182)
self.assertEqual(n3.n_missing, 28)
self.assertAlmostEqual(n3.pc_covered, 86.666666666)
def test_pc_covered_str(self):
# Numbers._precision is a global, which is bad.
Numbers.set_precision(0)
n0 = Numbers(n_files=1, n_statements=1000, n_missing=0)
n1 = Numbers(n_files=1, n_statements=1000, n_missing=1)
n999 = Numbers(n_files=1, n_statements=1000, n_missing=999)
n1000 = Numbers(n_files=1, n_statements=1000, n_missing=1000)
self.assertEqual(n0.pc_covered_str, "100")
self.assertEqual(n1.pc_covered_str, "99")
self.assertEqual(n999.pc_covered_str, "1")
self.assertEqual(n1000.pc_covered_str, "0")
def test_pc_covered_str_precision(self):
# Numbers._precision is a global, which is bad.
Numbers.set_precision(1)
n0 = Numbers(n_files=1, n_statements=10000, n_missing=0)
n1 = Numbers(n_files=1, n_statements=10000, n_missing=1)
n9999 = Numbers(n_files=1, n_statements=10000, n_missing=9999)
n10000 = Numbers(n_files=1, n_statements=10000, n_missing=10000)
self.assertEqual(n0.pc_covered_str, "100.0")
self.assertEqual(n1.pc_covered_str, "99.9")
self.assertEqual(n9999.pc_covered_str, "0.1")
self.assertEqual(n10000.pc_covered_str, "0.0")
Numbers.set_precision(0)
def test_covered_ratio(self):
n = Numbers(n_files=1, n_statements=200, n_missing=47)
self.assertEqual(n.ratio_covered, (153, 200))
n = Numbers(
n_files=1, n_statements=200, n_missing=47,
n_branches=10, n_missing_branches=3, n_partial_branches=1000,
)
self.assertEqual(n.ratio_covered, (160, 210))
@pytest.mark.parametrize("total, fail_under, precision, result", [
# fail_under==0 means anything is fine!
(0, 0, 0, False),
(0.001, 0, 0, False),
# very small fail_under is possible to fail.
(0.001, 0.01, 0, True),
# Rounding should work properly.
(42.1, 42, 0, False),
(42.1, 43, 0, True),
(42.857, 42, 0, False),
(42.857, 43, 0, False),
(42.857, 44, 0, True),
(42.857, 42.856, 3, False),
(42.857, 42.858, 3, True),
# If you don't specify precision, your fail-under is rounded.
(42.857, 42.856, 0, False),
# Values near 100 should only be treated as 100 if they are 100.
(99.8, 100, 0, True),
(100.0, 100, 0, False),
(99.8, 99.7, 1, False),
(99.88, 99.90, 2, True),
(99.999, 100, 1, True),
(99.999, 100, 2, True),
(99.999, 100, 3, True),
])
def test_should_fail_under(total, fail_under, precision, result):
assert should_fail_under(float(total), float(fail_under), precision) == result
def test_should_fail_under_invalid_value():
with pytest.raises(CoverageException, match=r"fail_under=101"):
should_fail_under(100.0, 101, 0)
@pytest.mark.parametrize("statements, lines, result", [
(set([1,2,3,4,5,10,11,12,13,14]), set([1,2,5,10,11,13,14]), "1-2, 5-11, 13-14"),
([1,2,3,4,5,10,11,12,13,14,98,99], [1,2,5,10,11,13,14,99], "1-2, 5-11, 13-14, 99"),
([1,2,3,4,98,99,100,101,102,103,104], [1,2,99,102,103,104], "1-2, 99, 102-104"),
([17], [17], "17"),
([90,91,92,93,94,95], [90,91,92,93,94,95], "90-95"),
([1, 2, 3, 4, 5], [], ""),
([1, 2, 3, 4, 5], [4], "4"),
])
def test_format_lines(statements, lines, result):
assert format_lines(statements, lines) == result
@pytest.mark.parametrize("statements, lines, arcs, result", [
(
set([1,2,3,4,5,10,11,12,13,14]),
set([1,2,5,10,11,13,14]),
(),
"1-2, 5-11, 13-14"
),
(
[1,2,3,4,5,10,11,12,13,14,98,99],
[1,2,5,10,11,13,14,99],
[(3, [4]), (5, [10, 11]), (98, [100, -1])],
"1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99"
),
(
[1,2,3,4,98,99,100,101,102,103,104],
[1,2,99,102,103,104],
[(3, [4]), (104, [-1])],
"1-2, 3->4, 99, 102-104"
),
])
def test_format_lines_with_arcs(statements, lines, arcs, result):
assert format_lines(statements, lines, arcs) == result
|
import os.path as op
import numpy as np
from ...utils import logger
from .constants import CTF
def _make_ctf_name(directory, extra, raise_error=True):
"""Make a CTF name."""
fname = op.join(directory, op.basename(directory)[:-3] + '.' + extra)
if not op.isfile(fname):
if raise_error:
raise IOError('Standard file %s not found' % fname)
else:
return None
return fname
def _read_double(fid, n=1):
"""Read a double."""
return np.fromfile(fid, '>f8', n)
def _read_string(fid, n_bytes, decode=True):
"""Read string."""
s0 = fid.read(n_bytes)
s = s0.split(b'\x00')[0]
return s.decode('utf-8') if decode else s
def _read_ustring(fid, n_bytes):
"""Read unsigned character string."""
return np.fromfile(fid, '>B', n_bytes)
def _read_int2(fid):
"""Read int from short."""
return np.fromfile(fid, '>i2', 1)[0]
def _read_int(fid):
"""Read a 32-bit integer."""
return np.fromfile(fid, '>i4', 1)[0]
def _move_to_next(fid, byte=8):
"""Move to next byte boundary."""
now = fid.tell()
if now % byte != 0:
now = now - (now % byte) + byte
fid.seek(now, 0)
def _read_filter(fid):
"""Read filter information."""
f = dict()
f['freq'] = _read_double(fid)[0]
f['class'] = _read_int(fid)
f['type'] = _read_int(fid)
f['npar'] = _read_int2(fid)
f['pars'] = _read_double(fid, f['npar'])
return f
def _read_comp_coeff(fid, d):
"""Read compensation coefficients."""
# Read the coefficients and initialize
d['ncomp'] = _read_int2(fid)
d['comp'] = list()
# Read each record
dt = np.dtype([
('sensor_name', 'S32'),
('coeff_type', '>i4'), ('d0', '>i4'),
('ncoeff', '>i2'),
('sensors', 'S%s' % CTF.CTFV_SENSOR_LABEL, CTF.CTFV_MAX_BALANCING),
('coeffs', '>f8', CTF.CTFV_MAX_BALANCING)])
comps = np.fromfile(fid, dt, d['ncomp'])
for k in range(d['ncomp']):
comp = dict()
d['comp'].append(comp)
comp['sensor_name'] = \
comps['sensor_name'][k].split(b'\x00')[0].decode('utf-8')
comp['coeff_type'] = comps['coeff_type'][k]
comp['ncoeff'] = comps['ncoeff'][k]
comp['sensors'] = [s.split(b'\x00')[0].decode('utf-8')
for s in comps['sensors'][k][:comp['ncoeff']]]
comp['coeffs'] = comps['coeffs'][k][:comp['ncoeff']]
comp['scanno'] = d['ch_names'].index(comp['sensor_name'])
def _read_res4(dsdir):
"""Read the magical res4 file."""
# adapted from read_res4.c
name = _make_ctf_name(dsdir, 'res4')
res = dict()
with open(name, 'rb') as fid:
# Read the fields
res['head'] = _read_string(fid, 8)
res['appname'] = _read_string(fid, 256)
res['origin'] = _read_string(fid, 256)
res['desc'] = _read_string(fid, 256)
res['nave'] = _read_int2(fid)
res['data_time'] = _read_string(fid, 255)
res['data_date'] = _read_string(fid, 255)
# Seems that date and time can be swapped
# (are they entered manually?!)
if '/' in res['data_time'] and ':' in res['data_date']:
data_date = res['data_date']
res['data_date'] = res['data_time']
res['data_time'] = data_date
res['nsamp'] = _read_int(fid)
res['nchan'] = _read_int2(fid)
_move_to_next(fid, 8)
res['sfreq'] = _read_double(fid)[0]
res['epoch_time'] = _read_double(fid)[0]
res['no_trials'] = _read_int2(fid)
_move_to_next(fid, 4)
res['pre_trig_pts'] = _read_int(fid)
res['no_trials_done'] = _read_int2(fid)
res['no_trials_bst_message_windowlay'] = _read_int2(fid)
_move_to_next(fid, 4)
res['save_trials'] = _read_int(fid)
res['primary_trigger'] = fid.read(1)
res['secondary_trigger'] = [fid.read(1)
for k in range(CTF.CTFV_MAX_AVERAGE_BINS)]
res['trigger_polarity_mask'] = fid.read(1)
res['trigger_mode'] = _read_int2(fid)
_move_to_next(fid, 4)
res['accept_reject'] = _read_int(fid)
res['run_time_bst_message_windowlay'] = _read_int2(fid)
_move_to_next(fid, 4)
res['zero_head'] = _read_int(fid)
_move_to_next(fid, 4)
res['artifact_mode'] = _read_int(fid)
_read_int(fid) # padding
res['nf_run_name'] = _read_string(fid, 32)
res['nf_run_title'] = _read_string(fid, 256)
res['nf_instruments'] = _read_string(fid, 32)
res['nf_collect_descriptor'] = _read_string(fid, 32)
res['nf_subject_id'] = _read_string(fid, 32)
res['nf_operator'] = _read_string(fid, 32)
if len(res['nf_operator']) == 0:
res['nf_operator'] = None
res['nf_sensor_file_name'] = _read_ustring(fid, 60)
_move_to_next(fid, 4)
res['rdlen'] = _read_int(fid)
fid.seek(CTF.FUNNY_POS, 0)
if res['rdlen'] > 0:
res['run_desc'] = _read_string(fid, res['rdlen'])
# Filters
res['nfilt'] = _read_int2(fid)
res['filters'] = list()
for k in range(res['nfilt']):
res['filters'].append(_read_filter(fid))
# Channel information (names, then data)
res['ch_names'] = list()
for k in range(res['nchan']):
ch_name = _read_string(fid, 32)
res['ch_names'].append(ch_name)
_coil_dt = np.dtype([
('pos', '>f8', 3), ('d0', '>f8'),
('norm', '>f8', 3), ('d1', '>f8'),
('turns', '>i2'), ('d2', '>i4'), ('d3', '>i2'),
('area', '>f8')])
_ch_dt = np.dtype([
('sensor_type_index', '>i2'),
('original_run_no', '>i2'),
('coil_type', '>i4'),
('proper_gain', '>f8'),
('qgain', '>f8'),
('io_gain', '>f8'),
('io_offset', '>f8'),
('num_coils', '>i2'),
('grad_order_no', '>i2'), ('d0', '>i4'),
('coil', _coil_dt, CTF.CTFV_MAX_COILS),
('head_coil', _coil_dt, CTF.CTFV_MAX_COILS)])
chs = np.fromfile(fid, _ch_dt, res['nchan'])
for coil in (chs['coil'], chs['head_coil']):
coil['pos'] /= 100.
coil['area'] *= 1e-4
# convert to dict
chs = [dict(zip(chs.dtype.names, x)) for x in chs]
res['chs'] = chs
for k in range(res['nchan']):
res['chs'][k]['ch_name'] = res['ch_names'][k]
# The compensation coefficients
_read_comp_coeff(fid, res)
logger.info(' res4 data read.')
return res
|
from pytest import fixture
from . import _get_mock_cfupdate
from tests.async_mock import patch
@fixture
def cfupdate(hass):
"""Mock the CloudflareUpdater for easier testing."""
mock_cfupdate = _get_mock_cfupdate()
with patch(
"homeassistant.components.cloudflare.CloudflareUpdater",
return_value=mock_cfupdate,
) as mock_api:
yield mock_api
@fixture
def cfupdate_flow(hass):
"""Mock the CloudflareUpdater for easier config flow testing."""
mock_cfupdate = _get_mock_cfupdate()
with patch(
"homeassistant.components.cloudflare.config_flow.CloudflareUpdater",
return_value=mock_cfupdate,
) as mock_api:
yield mock_api
|
import copy
from datetime import timedelta
import logging
from pyfronius import Fronius
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_MONITORED_CONDITIONS,
CONF_RESOURCE,
CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_SCOPE = "scope"
TYPE_INVERTER = "inverter"
TYPE_STORAGE = "storage"
TYPE_METER = "meter"
TYPE_POWER_FLOW = "power_flow"
SCOPE_DEVICE = "device"
SCOPE_SYSTEM = "system"
DEFAULT_SCOPE = SCOPE_DEVICE
DEFAULT_DEVICE = 0
DEFAULT_INVERTER = 1
DEFAULT_SCAN_INTERVAL = timedelta(seconds=60)
SENSOR_TYPES = [TYPE_INVERTER, TYPE_STORAGE, TYPE_METER, TYPE_POWER_FLOW]
SCOPE_TYPES = [SCOPE_DEVICE, SCOPE_SYSTEM]
def _device_id_validator(config):
"""Ensure that inverters have default id 1 and other devices 0."""
config = copy.deepcopy(config)
for cond in config[CONF_MONITORED_CONDITIONS]:
if CONF_DEVICE not in cond:
if cond[CONF_SENSOR_TYPE] == TYPE_INVERTER:
cond[CONF_DEVICE] = DEFAULT_INVERTER
else:
cond[CONF_DEVICE] = DEFAULT_DEVICE
return config
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_RESOURCE): cv.url,
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Optional(CONF_SCOPE, default=DEFAULT_SCOPE): vol.In(
SCOPE_TYPES
),
vol.Optional(CONF_DEVICE): cv.positive_int,
}
],
),
}
),
_device_id_validator,
)
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up of Fronius platform."""
session = async_get_clientsession(hass)
fronius = Fronius(session, config[CONF_RESOURCE])
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
adapters = []
# Creates all adapters for monitored conditions
for condition in config[CONF_MONITORED_CONDITIONS]:
device = condition[CONF_DEVICE]
sensor_type = condition[CONF_SENSOR_TYPE]
scope = condition[CONF_SCOPE]
name = f"Fronius {condition[CONF_SENSOR_TYPE].replace('_', ' ').capitalize()} {device if scope == SCOPE_DEVICE else SCOPE_SYSTEM} {config[CONF_RESOURCE]}"
if sensor_type == TYPE_INVERTER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusInverterSystem
else:
adapter_cls = FroniusInverterDevice
elif sensor_type == TYPE_METER:
if scope == SCOPE_SYSTEM:
adapter_cls = FroniusMeterSystem
else:
adapter_cls = FroniusMeterDevice
elif sensor_type == TYPE_POWER_FLOW:
adapter_cls = FroniusPowerFlow
else:
adapter_cls = FroniusStorage
adapters.append(adapter_cls(fronius, name, device, async_add_entities))
# Creates a lamdba that fetches an update when called
def adapter_data_fetcher(data_adapter):
async def fetch_data(*_):
await data_adapter.async_update()
return fetch_data
# Set up the fetching in a fixed interval for each adapter
for adapter in adapters:
fetch = adapter_data_fetcher(adapter)
# fetch data once at set-up
await fetch()
async_track_time_interval(hass, fetch, scan_interval)
class FroniusAdapter:
"""The Fronius sensor fetching component."""
def __init__(self, bridge, name, device, add_entities):
"""Initialize the sensor."""
self.bridge = bridge
self._name = name
self._device = device
self._fetched = {}
self.sensors = set()
self._registered_sensors = set()
self._add_entities = add_entities
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def data(self):
"""Return the state attributes."""
return self._fetched
async def async_update(self):
"""Retrieve and update latest state."""
values = {}
try:
values = await self._update()
except ConnectionError:
_LOGGER.error("Failed to update: connection error")
except ValueError:
_LOGGER.error(
"Failed to update: invalid response returned."
"Maybe the configured device is not supported"
)
if not values:
return
attributes = self._fetched
# Copy data of current fronius device
for key, entry in values.items():
# If the data is directly a sensor
if "value" in entry:
attributes[key] = entry
self._fetched = attributes
# Add discovered value fields as sensors
# because some fields are only sent temporarily
new_sensors = []
for key in attributes:
if key not in self.sensors:
self.sensors.add(key)
_LOGGER.info("Discovered %s, adding as sensor", key)
new_sensors.append(FroniusTemplateSensor(self, key))
self._add_entities(new_sensors, True)
# Schedule an update for all included sensors
for sensor in self._registered_sensors:
sensor.async_schedule_update_ha_state(True)
async def _update(self):
"""Return values of interest."""
async def register(self, sensor):
"""Register child sensor for update subscriptions."""
self._registered_sensors.add(sensor)
class FroniusInverterSystem(FroniusAdapter):
"""Adapter for the fronius inverter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_inverter_data()
class FroniusInverterDevice(FroniusAdapter):
"""Adapter for the fronius inverter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_inverter_data(self._device)
class FroniusStorage(FroniusAdapter):
"""Adapter for the fronius battery storage."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_storage_data(self._device)
class FroniusMeterSystem(FroniusAdapter):
"""Adapter for the fronius meter with system scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_system_meter_data()
class FroniusMeterDevice(FroniusAdapter):
"""Adapter for the fronius meter with device scope."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_meter_data(self._device)
class FroniusPowerFlow(FroniusAdapter):
"""Adapter for the fronius power flow."""
async def _update(self):
"""Get the values for the current state."""
return await self.bridge.current_power_flow()
class FroniusTemplateSensor(Entity):
"""Sensor for the single values (e.g. pv power, ac power)."""
def __init__(self, parent: FroniusAdapter, name):
"""Initialize a singular value sensor."""
self._name = name
self.parent = parent
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name.replace('_', ' ').capitalize()} {self.parent.name}"
@property
def state(self):
"""Return the current state."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def should_poll(self):
"""Device should not be polled, returns False."""
return False
async def async_update(self):
"""Update the internal state."""
state = self.parent.data.get(self._name)
self._state = state.get("value")
self._unit = state.get("unit")
async def async_added_to_hass(self):
"""Register at parent component for updates."""
await self.parent.register(self)
def __hash__(self):
"""Hash sensor by hashing its name."""
return hash(self.name)
|
import collections
import logging
from satel_integra.satel_integra import AsyncSatel
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
DEFAULT_ALARM_NAME = "satel_integra"
DEFAULT_PORT = 7094
DEFAULT_CONF_ARM_HOME_MODE = 1
DEFAULT_DEVICE_PARTITION = 1
DEFAULT_ZONE_TYPE = "motion"
_LOGGER = logging.getLogger(__name__)
DOMAIN = "satel_integra"
DATA_SATEL = "satel_integra"
CONF_DEVICE_CODE = "code"
CONF_DEVICE_PARTITIONS = "partitions"
CONF_ARM_HOME_MODE = "arm_home_mode"
CONF_ZONE_NAME = "name"
CONF_ZONE_TYPE = "type"
CONF_ZONES = "zones"
CONF_OUTPUTS = "outputs"
CONF_SWITCHABLE_OUTPUTS = "switchable_outputs"
ZONES = "zones"
SIGNAL_PANEL_MESSAGE = "satel_integra.panel_message"
SIGNAL_PANEL_ARM_AWAY = "satel_integra.panel_arm_away"
SIGNAL_PANEL_ARM_HOME = "satel_integra.panel_arm_home"
SIGNAL_PANEL_DISARM = "satel_integra.panel_disarm"
SIGNAL_ZONES_UPDATED = "satel_integra.zones_updated"
SIGNAL_OUTPUTS_UPDATED = "satel_integra.outputs_updated"
ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): cv.string,
}
)
EDITABLE_OUTPUT_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_NAME): cv.string})
PARTITION_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Optional(CONF_ARM_HOME_MODE, default=DEFAULT_CONF_ARM_HOME_MODE): vol.In(
[1, 2, 3]
),
}
)
def is_alarm_code_necessary(value):
"""Check if alarm code must be configured."""
if value.get(CONF_SWITCHABLE_OUTPUTS) and CONF_DEVICE_CODE not in value:
raise vol.Invalid("You need to specify alarm code to use switchable_outputs")
return value
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DEVICE_CODE): cv.string,
vol.Optional(CONF_DEVICE_PARTITIONS, default={}): {
vol.Coerce(int): PARTITION_SCHEMA
},
vol.Optional(CONF_ZONES, default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_OUTPUTS, default={}): {vol.Coerce(int): ZONE_SCHEMA},
vol.Optional(CONF_SWITCHABLE_OUTPUTS, default={}): {
vol.Coerce(int): EDITABLE_OUTPUT_SCHEMA
},
},
is_alarm_code_necessary,
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Satel Integra component."""
conf = config.get(DOMAIN)
zones = conf.get(CONF_ZONES)
outputs = conf.get(CONF_OUTPUTS)
switchable_outputs = conf.get(CONF_SWITCHABLE_OUTPUTS)
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
partitions = conf.get(CONF_DEVICE_PARTITIONS)
monitored_outputs = collections.OrderedDict(
list(outputs.items()) + list(switchable_outputs.items())
)
controller = AsyncSatel(host, port, hass.loop, zones, monitored_outputs, partitions)
hass.data[DATA_SATEL] = controller
result = await controller.connect()
if not result:
return False
@callback
def _close(*_):
controller.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)
_LOGGER.debug("Arm home config: %s, mode: %s ", conf, conf.get(CONF_ARM_HOME_MODE))
hass.async_create_task(
async_load_platform(hass, "alarm_control_panel", DOMAIN, conf, config)
)
hass.async_create_task(
async_load_platform(
hass,
"binary_sensor",
DOMAIN,
{CONF_ZONES: zones, CONF_OUTPUTS: outputs},
config,
)
)
hass.async_create_task(
async_load_platform(
hass,
"switch",
DOMAIN,
{
CONF_SWITCHABLE_OUTPUTS: switchable_outputs,
CONF_DEVICE_CODE: conf.get(CONF_DEVICE_CODE),
},
config,
)
)
@callback
def alarm_status_update_callback():
"""Send status update received from alarm to Home Assistant."""
_LOGGER.debug("Sending request to update panel state")
async_dispatcher_send(hass, SIGNAL_PANEL_MESSAGE)
@callback
def zones_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Zones callback, status: %s", status)
async_dispatcher_send(hass, SIGNAL_ZONES_UPDATED, status[ZONES])
@callback
def outputs_update_callback(status):
"""Update zone objects as per notification from the alarm."""
_LOGGER.debug("Outputs updated callback , status: %s", status)
async_dispatcher_send(hass, SIGNAL_OUTPUTS_UPDATED, status["outputs"])
# Create a task instead of adding a tracking job, since this task will
# run until the connection to satel_integra is closed.
hass.loop.create_task(controller.keep_alive())
hass.loop.create_task(
controller.monitor_status(
alarm_status_update_callback, zones_update_callback, outputs_update_callback
)
)
return True
|
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import DOMAIN, const
ACTION_TYPES = {"set_hvac_mode", "set_preset_mode"}
SET_HVAC_MODE_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): "set_hvac_mode",
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
vol.Required(const.ATTR_HVAC_MODE): vol.In(const.HVAC_MODES),
}
)
SET_PRESET_MODE_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): "set_preset_mode",
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
vol.Required(const.ATTR_PRESET_MODE): str,
}
)
ACTION_SCHEMA = vol.Any(SET_HVAC_MODE_SCHEMA, SET_PRESET_MODE_SCHEMA)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Climate devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
# We need a state or else we can't populate the HVAC and preset modes.
if state is None:
continue
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "set_hvac_mode",
}
)
if state.attributes[ATTR_SUPPORTED_FEATURES] & const.SUPPORT_PRESET_MODE:
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "set_preset_mode",
}
)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if config[CONF_TYPE] == "set_hvac_mode":
service = const.SERVICE_SET_HVAC_MODE
service_data[const.ATTR_HVAC_MODE] = config[const.ATTR_HVAC_MODE]
elif config[CONF_TYPE] == "set_preset_mode":
service = const.SERVICE_SET_PRESET_MODE
service_data[const.ATTR_PRESET_MODE] = config[const.ATTR_PRESET_MODE]
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
async def async_get_action_capabilities(hass, config):
"""List action capabilities."""
state = hass.states.get(config[CONF_ENTITY_ID])
action_type = config[CONF_TYPE]
fields = {}
if action_type == "set_hvac_mode":
hvac_modes = state.attributes[const.ATTR_HVAC_MODES] if state else []
fields[vol.Required(const.ATTR_HVAC_MODE)] = vol.In(hvac_modes)
elif action_type == "set_preset_mode":
if state:
preset_modes = state.attributes.get(const.ATTR_PRESET_MODES, [])
else:
preset_modes = []
fields[vol.Required(const.ATTR_PRESET_MODE)] = vol.In(preset_modes)
return {"extra_fields": vol.Schema(fields)}
|
import sys
import openrazer.client
device_manager = openrazer.client.DeviceManager()
keyboard = None
def on_off(value, true='On', false='Off'):
if value:
return true
else:
return false
for device in device_manager.devices:
if device.type == 'keyboard':
keyboard = device
break
else:
print("Could not find suitable keyboard", file=sys.stderr)
sys.exit(1)
print("Name: {0}".format(keyboard.name))
print("Type: {0}".format(keyboard.type))
print("Serial: {0}".format(keyboard.serial))
print("Firmware Version: {0}".format(keyboard.firmware_version))
print("------------------------")
print("Brightness: {0}".format(keyboard.brightness))
if keyboard.has("game_mode_led"):
print("Game Mode LED: {0}".format(on_off(keyboard.game_mode_led)))
if keyboard.has("macro_mode_led"):
macro_led = on_off(keyboard.macro_mode_led)
macro_led_state = on_off(keyboard.macro_mode_led_effect == openrazer.client.constants.MACRO_LED_STATIC, true='Static', false='Blinking')
print("Macro Mode LED: {0} ({1})".format(macro_led, macro_led_state))
print("------------------------")
print("Capabilities:")
for key, value in sorted(keyboard.capabilities.items(), key=lambda x: x[0]):
print(" {0}: {1}".format(key, value))
|
from contextlib import contextmanager
from datetime import timedelta
import logging
import os
import time
from sqlalchemy.exc import OperationalError, SQLAlchemyError
import homeassistant.util.dt as dt_util
from .const import CONF_DB_INTEGRITY_CHECK, DATA_INSTANCE, SQLITE_URL_PREFIX
from .models import ALL_TABLES, process_timestamp
_LOGGER = logging.getLogger(__name__)
RETRIES = 3
QUERY_RETRY_WAIT = 0.1
SQLITE3_POSTFIXES = ["", "-wal", "-shm"]
# This is the maximum time after the recorder ends the session
# before we no longer consider startup to be a "restart" and we
# should do a check on the sqlite3 database.
MAX_RESTART_TIME = timedelta(minutes=10)
@contextmanager
def session_scope(*, hass=None, session=None):
"""Provide a transactional scope around a series of operations."""
if session is None and hass is not None:
session = hass.data[DATA_INSTANCE].get_session()
if session is None:
raise RuntimeError("Session required")
need_rollback = False
try:
yield session
if session.transaction:
need_rollback = True
session.commit()
except Exception as err:
_LOGGER.error("Error executing query: %s", err)
if need_rollback:
session.rollback()
raise
finally:
session.close()
def commit(session, work):
"""Commit & retry work: Either a model or in a function."""
for _ in range(0, RETRIES):
try:
if callable(work):
work(session)
else:
session.add(work)
session.commit()
return True
except OperationalError as err:
_LOGGER.error("Error executing query: %s", err)
session.rollback()
time.sleep(QUERY_RETRY_WAIT)
return False
def execute(qry, to_native=False, validate_entity_ids=True):
"""Query the database and convert the objects to HA native form.
This method also retries a few times in the case of stale connections.
"""
for tryno in range(0, RETRIES):
try:
timer_start = time.perf_counter()
if to_native:
result = [
row
for row in (
row.to_native(validate_entity_id=validate_entity_ids)
for row in qry
)
if row is not None
]
else:
result = list(qry)
if _LOGGER.isEnabledFor(logging.DEBUG):
elapsed = time.perf_counter() - timer_start
if to_native:
_LOGGER.debug(
"converting %d rows to native objects took %fs",
len(result),
elapsed,
)
else:
_LOGGER.debug(
"querying %d rows took %fs",
len(result),
elapsed,
)
return result
except SQLAlchemyError as err:
_LOGGER.error("Error executing query: %s", err)
if tryno == RETRIES - 1:
raise
time.sleep(QUERY_RETRY_WAIT)
def validate_or_move_away_sqlite_database(dburl: str, db_integrity_check: bool) -> bool:
"""Ensure that the database is valid or move it away."""
dbpath = dburl[len(SQLITE_URL_PREFIX) :]
if not os.path.exists(dbpath):
# Database does not exist yet, this is OK
return True
if not validate_sqlite_database(dbpath, db_integrity_check):
_move_away_broken_database(dbpath)
return False
return True
def last_run_was_recently_clean(cursor):
"""Verify the last recorder run was recently clean."""
cursor.execute("SELECT end FROM recorder_runs ORDER BY start DESC LIMIT 1;")
end_time = cursor.fetchone()
if not end_time or not end_time[0]:
return False
last_run_end_time = process_timestamp(dt_util.parse_datetime(end_time[0]))
now = dt_util.utcnow()
_LOGGER.debug("The last run ended at: %s (now: %s)", last_run_end_time, now)
if last_run_end_time + MAX_RESTART_TIME < now:
return False
return True
def basic_sanity_check(cursor):
"""Check tables to make sure select does not fail."""
for table in ALL_TABLES:
cursor.execute(f"SELECT * FROM {table} LIMIT 1;") # sec: not injection
return True
def validate_sqlite_database(dbpath: str, db_integrity_check: bool) -> bool:
"""Run a quick check on an sqlite database to see if it is corrupt."""
import sqlite3 # pylint: disable=import-outside-toplevel
try:
conn = sqlite3.connect(dbpath)
run_checks_on_open_db(dbpath, conn.cursor(), db_integrity_check)
conn.close()
except sqlite3.DatabaseError:
_LOGGER.exception("The database at %s is corrupt or malformed.", dbpath)
return False
return True
def run_checks_on_open_db(dbpath, cursor, db_integrity_check):
"""Run checks that will generate a sqlite3 exception if there is corruption."""
if basic_sanity_check(cursor) and last_run_was_recently_clean(cursor):
_LOGGER.debug(
"The quick_check will be skipped as the system was restarted cleanly and passed the basic sanity check"
)
return
if not db_integrity_check:
# Always warn so when it does fail they remember it has
# been manually disabled
_LOGGER.warning(
"The quick_check on the sqlite3 database at %s was skipped because %s was disabled",
dbpath,
CONF_DB_INTEGRITY_CHECK,
)
return
_LOGGER.debug(
"A quick_check is being performed on the sqlite3 database at %s", dbpath
)
cursor.execute("PRAGMA QUICK_CHECK")
def _move_away_broken_database(dbfile: str) -> None:
"""Move away a broken sqlite3 database."""
isotime = dt_util.utcnow().isoformat()
corrupt_postfix = f".corrupt.{isotime}"
_LOGGER.error(
"The system will rename the corrupt database file %s to %s in order to allow startup to proceed",
dbfile,
f"{dbfile}{corrupt_postfix}",
)
for postfix in SQLITE3_POSTFIXES:
path = f"{dbfile}{postfix}"
if not os.path.exists(path):
continue
os.rename(path, f"{path}{corrupt_postfix}")
|
from datetime import datetime, timedelta
import logging
import os
from googleapiclient import discovery as google_discovery
import httplib2
from oauth2client.client import (
FlowExchangeError,
OAuth2DeviceCodeError,
OAuth2WebServerFlow,
)
from oauth2client.file import Storage
import voluptuous as vol
from voluptuous.error import Error as VoluptuousError
import yaml
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import track_time_change
from homeassistant.util import convert, dt
_LOGGER = logging.getLogger(__name__)
DOMAIN = "google"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CONF_TRACK_NEW = "track_new_calendar"
CONF_CAL_ID = "cal_id"
CONF_DEVICE_ID = "device_id"
CONF_NAME = "name"
CONF_ENTITIES = "entities"
CONF_TRACK = "track"
CONF_SEARCH = "search"
CONF_OFFSET = "offset"
CONF_IGNORE_AVAILABILITY = "ignore_availability"
CONF_MAX_RESULTS = "max_results"
DEFAULT_CONF_TRACK_NEW = True
DEFAULT_CONF_OFFSET = "!!"
EVENT_CALENDAR_ID = "calendar_id"
EVENT_DESCRIPTION = "description"
EVENT_END_CONF = "end"
EVENT_END_DATE = "end_date"
EVENT_END_DATETIME = "end_date_time"
EVENT_IN = "in"
EVENT_IN_DAYS = "days"
EVENT_IN_WEEKS = "weeks"
EVENT_START_CONF = "start"
EVENT_START_DATE = "start_date"
EVENT_START_DATETIME = "start_date_time"
EVENT_SUMMARY = "summary"
EVENT_TYPES_CONF = "event_types"
NOTIFICATION_ID = "google_calendar_notification"
NOTIFICATION_TITLE = "Google Calendar Setup"
GROUP_NAME_ALL_CALENDARS = "Google Calendar Sensors"
SERVICE_SCAN_CALENDARS = "scan_for_calendars"
SERVICE_FOUND_CALENDARS = "found_calendar"
SERVICE_ADD_EVENT = "add_event"
DATA_INDEX = "google_calendars"
YAML_DEVICES = f"{DOMAIN}_calendars.yaml"
SCOPES = "https://www.googleapis.com/auth/calendar"
TOKEN_FILE = f".{DOMAIN}.token"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
_SINGLE_CALSEARCH_CONFIG = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_IGNORE_AVAILABILITY, default=True): cv.boolean,
vol.Optional(CONF_OFFSET): cv.string,
vol.Optional(CONF_SEARCH): cv.string,
vol.Optional(CONF_TRACK): cv.boolean,
vol.Optional(CONF_MAX_RESULTS): cv.positive_int,
}
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_CAL_ID): cv.string,
vol.Required(CONF_ENTITIES, None): vol.All(
cv.ensure_list, [_SINGLE_CALSEARCH_CONFIG]
),
},
extra=vol.ALLOW_EXTRA,
)
_EVENT_IN_TYPES = vol.Schema(
{
vol.Exclusive(EVENT_IN_DAYS, EVENT_TYPES_CONF): cv.positive_int,
vol.Exclusive(EVENT_IN_WEEKS, EVENT_TYPES_CONF): cv.positive_int,
}
)
ADD_EVENT_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(EVENT_CALENDAR_ID): cv.string,
vol.Required(EVENT_SUMMARY): cv.string,
vol.Optional(EVENT_DESCRIPTION, default=""): cv.string,
vol.Exclusive(EVENT_START_DATE, EVENT_START_CONF): cv.date,
vol.Exclusive(EVENT_END_DATE, EVENT_END_CONF): cv.date,
vol.Exclusive(EVENT_START_DATETIME, EVENT_START_CONF): cv.datetime,
vol.Exclusive(EVENT_END_DATETIME, EVENT_END_CONF): cv.datetime,
vol.Exclusive(EVENT_IN, EVENT_START_CONF, EVENT_END_CONF): _EVENT_IN_TYPES,
}
)
def do_authentication(hass, hass_config, config):
"""Notify user of actions and authenticate.
Notify user of user_code and verification_url then poll
until we have an access token.
"""
oauth = OAuth2WebServerFlow(
client_id=config[CONF_CLIENT_ID],
client_secret=config[CONF_CLIENT_SECRET],
scope="https://www.googleapis.com/auth/calendar",
redirect_uri="Home-Assistant.io",
)
try:
dev_flow = oauth.step1_get_device_and_user_codes()
except OAuth2DeviceCodeError as err:
hass.components.persistent_notification.create(
f"Error: {err}<br />You will need to restart hass after fixing." "",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
hass.components.persistent_notification.create(
(
f"In order to authorize Home-Assistant to view your calendars "
f'you must visit: <a href="{dev_flow.verification_url}" target="_blank">{dev_flow.verification_url}</a> and enter '
f"code: {dev_flow.user_code}"
),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
def step2_exchange(now):
"""Keep trying to validate the user_code until it expires."""
if now >= dt.as_local(dev_flow.user_code_expiry):
hass.components.persistent_notification.create(
"Authentication code expired, please restart "
"Home-Assistant and try again",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
listener()
try:
credentials = oauth.step2_exchange(device_flow_info=dev_flow)
except FlowExchangeError:
# not ready yet, call again
return
storage = Storage(hass.config.path(TOKEN_FILE))
storage.put(credentials)
do_setup(hass, hass_config, config)
listener()
hass.components.persistent_notification.create(
(
f"We are all setup now. Check {YAML_DEVICES} for calendars that have "
f"been found"
),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
listener = track_time_change(
hass, step2_exchange, second=range(0, 60, dev_flow.interval)
)
return True
def setup(hass, config):
"""Set up the Google platform."""
if DATA_INDEX not in hass.data:
hass.data[DATA_INDEX] = {}
conf = config.get(DOMAIN, {})
if not conf:
# component is set up by tts platform
return True
token_file = hass.config.path(TOKEN_FILE)
if not os.path.isfile(token_file):
do_authentication(hass, config, conf)
else:
if not check_correct_scopes(token_file):
do_authentication(hass, config, conf)
else:
do_setup(hass, config, conf)
return True
def check_correct_scopes(token_file):
"""Check for the correct scopes in file."""
tokenfile = open(token_file).read()
if "readonly" in tokenfile:
_LOGGER.warning("Please re-authenticate with Google")
return False
return True
def setup_services(hass, hass_config, track_new_found_calendars, calendar_service):
"""Set up the service listeners."""
def _found_calendar(call):
"""Check if we know about a calendar and generate PLATFORM_DISCOVER."""
calendar = get_calendar_info(hass, call.data)
if hass.data[DATA_INDEX].get(calendar[CONF_CAL_ID]) is not None:
return
hass.data[DATA_INDEX].update({calendar[CONF_CAL_ID]: calendar})
update_config(
hass.config.path(YAML_DEVICES), hass.data[DATA_INDEX][calendar[CONF_CAL_ID]]
)
discovery.load_platform(
hass,
"calendar",
DOMAIN,
hass.data[DATA_INDEX][calendar[CONF_CAL_ID]],
hass_config,
)
hass.services.register(DOMAIN, SERVICE_FOUND_CALENDARS, _found_calendar)
def _scan_for_calendars(service):
"""Scan for new calendars."""
service = calendar_service.get()
cal_list = service.calendarList()
calendars = cal_list.list().execute()["items"]
for calendar in calendars:
calendar["track"] = track_new_found_calendars
hass.services.call(DOMAIN, SERVICE_FOUND_CALENDARS, calendar)
hass.services.register(DOMAIN, SERVICE_SCAN_CALENDARS, _scan_for_calendars)
def _add_event(call):
"""Add a new event to calendar."""
service = calendar_service.get()
start = {}
end = {}
if EVENT_IN in call.data:
if EVENT_IN_DAYS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(days=call.data[EVENT_IN][EVENT_IN_DAYS])
end_in = start_in + timedelta(days=1)
start = {"date": start_in.strftime("%Y-%m-%d")}
end = {"date": end_in.strftime("%Y-%m-%d")}
elif EVENT_IN_WEEKS in call.data[EVENT_IN]:
now = datetime.now()
start_in = now + timedelta(weeks=call.data[EVENT_IN][EVENT_IN_WEEKS])
end_in = start_in + timedelta(days=1)
start = {"date": start_in.strftime("%Y-%m-%d")}
end = {"date": end_in.strftime("%Y-%m-%d")}
elif EVENT_START_DATE in call.data:
start = {"date": str(call.data[EVENT_START_DATE])}
end = {"date": str(call.data[EVENT_END_DATE])}
elif EVENT_START_DATETIME in call.data:
start_dt = str(
call.data[EVENT_START_DATETIME].strftime("%Y-%m-%dT%H:%M:%S")
)
end_dt = str(call.data[EVENT_END_DATETIME].strftime("%Y-%m-%dT%H:%M:%S"))
start = {"dateTime": start_dt, "timeZone": str(hass.config.time_zone)}
end = {"dateTime": end_dt, "timeZone": str(hass.config.time_zone)}
event = {
"summary": call.data[EVENT_SUMMARY],
"description": call.data[EVENT_DESCRIPTION],
"start": start,
"end": end,
}
service_data = {"calendarId": call.data[EVENT_CALENDAR_ID], "body": event}
event = service.events().insert(**service_data).execute()
hass.services.register(
DOMAIN, SERVICE_ADD_EVENT, _add_event, schema=ADD_EVENT_SERVICE_SCHEMA
)
return True
def do_setup(hass, hass_config, config):
"""Run the setup after we have everything configured."""
# Load calendars the user has configured
hass.data[DATA_INDEX] = load_config(hass.config.path(YAML_DEVICES))
calendar_service = GoogleCalendarService(hass.config.path(TOKEN_FILE))
track_new_found_calendars = convert(
config.get(CONF_TRACK_NEW), bool, DEFAULT_CONF_TRACK_NEW
)
setup_services(hass, hass_config, track_new_found_calendars, calendar_service)
for calendar in hass.data[DATA_INDEX].values():
discovery.load_platform(hass, "calendar", DOMAIN, calendar, hass_config)
# Look for any new calendars
hass.services.call(DOMAIN, SERVICE_SCAN_CALENDARS, None)
return True
class GoogleCalendarService:
"""Calendar service interface to Google."""
def __init__(self, token_file):
"""Init the Google Calendar service."""
self.token_file = token_file
def get(self):
"""Get the calendar service from the storage file token."""
credentials = Storage(self.token_file).get()
http = credentials.authorize(httplib2.Http())
service = google_discovery.build(
"calendar", "v3", http=http, cache_discovery=False
)
return service
def get_calendar_info(hass, calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA(
{
CONF_CAL_ID: calendar["id"],
CONF_ENTITIES: [
{
CONF_TRACK: calendar["track"],
CONF_NAME: calendar["summary"],
CONF_DEVICE_ID: generate_entity_id(
"{}", calendar["summary"], hass=hass
),
}
],
}
)
return calendar_info
def load_config(path):
"""Load the google_calendar_devices.yaml."""
calendars = {}
try:
with open(path) as file:
data = yaml.safe_load(file)
for calendar in data:
try:
calendars.update({calendar[CONF_CAL_ID]: DEVICE_SCHEMA(calendar)})
except VoluptuousError as exception:
# keep going
_LOGGER.warning("Calendar Invalid Data: %s", exception)
except FileNotFoundError:
# When YAML file could not be loaded/did not contain a dict
return {}
return calendars
def update_config(path, calendar):
"""Write the google_calendar_devices.yaml."""
with open(path, "a") as out:
out.write("\n")
yaml.dump([calendar], out, default_flow_style=False)
|
import pytest
import voluptuous as vol
from homeassistant.components.broadlink.helpers import data_packet, mac_address
async def test_padding(hass):
"""Verify that non padding strings are allowed."""
assert data_packet("Jg") == b"&"
assert data_packet("Jg=") == b"&"
assert data_packet("Jg==") == b"&"
async def test_valid_mac_address(hass):
"""Test we convert a valid MAC address to bytes."""
valid = [
"A1B2C3D4E5F6",
"a1b2c3d4e5f6",
"A1B2-C3D4-E5F6",
"a1b2-c3d4-e5f6",
"A1B2.C3D4.E5F6",
"a1b2.c3d4.e5f6",
"A1-B2-C3-D4-E5-F6",
"a1-b2-c3-d4-e5-f6",
"A1:B2:C3:D4:E5:F6",
"a1:b2:c3:d4:e5:f6",
]
for mac in valid:
assert mac_address(mac) == b"\xa1\xb2\xc3\xd4\xe5\xf6"
async def test_invalid_mac_address(hass):
"""Test we do not accept an invalid MAC address."""
invalid = [
None,
123,
["a", "b", "c"],
{"abc": "def"},
"a1b2c3d4e5f",
"a1b2.c3d4.e5f",
"a1-b2-c3-d4-e5-f",
"a1b2c3d4e5f66",
"a1b2.c3d4.e5f66",
"a1-b2-c3-d4-e5-f66",
"a1b2c3d4e5fg",
"a1b2.c3d4.e5fg",
"a1-b2-c3-d4-e5-fg",
"a1b.2c3d4.e5fg",
"a1b-2-c3-d4-e5-fg",
]
for mac in invalid:
with pytest.raises((ValueError, vol.Invalid)):
mac_address(mac)
|
from toonapi import Agreement, ToonError
from homeassistant import data_entry_flow
from homeassistant.components.toon.const import CONF_AGREEMENT, CONF_MIGRATE, DOMAIN
from homeassistant.config import async_process_ha_core_config
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def setup_component(hass):
"""Set up Toon component."""
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
with patch("os.path.isfile", return_value=False):
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"}},
)
await hass.async_block_till_done()
async def test_abort_if_no_configuration(hass):
"""Test abort if no app is configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_full_flow_implementation(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Test registering an integration and finishing flow works."""
await setup_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pick_implementation"
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result2["url"] == (
"https://api.toon.eu/authorize"
"?response_type=code&client_id=client"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
"&tenant_id=eneco&issuer=identity.toon.eu"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", return_value=[Agreement(agreement_id=123)]):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["data"]["auth_implementation"] == "eneco"
assert result3["data"]["agreement_id"] == 123
result3["data"]["token"].pop("expires_at")
assert result3["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
async def test_no_agreements(hass, aiohttp_client, aioclient_mock, current_request):
"""Test abort when there are no displays."""
await setup_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", return_value=[]):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "no_agreements"
async def test_multiple_agreements(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Test abort when there are no displays."""
await setup_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"toonapi.Toon.agreements",
return_value=[Agreement(agreement_id=1), Agreement(agreement_id=2)],
):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result3["step_id"] == "agreement"
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_AGREEMENT: "None None, None"}
)
assert result4["data"]["auth_implementation"] == "eneco"
assert result4["data"]["agreement_id"] == 1
async def test_agreement_already_set_up(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Test showing display form again if display already exists."""
await setup_component(hass)
MockConfigEntry(domain=DOMAIN, unique_id=123).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", return_value=[Agreement(agreement_id=123)]):
result3 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result3["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result3["reason"] == "already_configured"
async def test_toon_abort(hass, aiohttp_client, aioclient_mock, current_request):
"""Test we abort on Toon error."""
await setup_component(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
await hass.config_entries.flow.async_configure(
result["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", side_effect=ToonError):
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result2["reason"] == "connection_error"
async def test_import(hass):
"""Test if importing step works."""
await setup_component(hass)
# Setting up the component without entries, should already have triggered
# it. Hence, expect this to throw an already_in_progress.
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_import_migration(hass, aiohttp_client, aioclient_mock, current_request):
"""Test if importing step with migration works."""
old_entry = MockConfigEntry(domain=DOMAIN, unique_id=123, version=1)
old_entry.add_to_hass(hass)
await setup_component(hass)
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].version == 1
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["context"][CONF_MIGRATE] == old_entry.entry_id
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": flows[0]["flow_id"]})
await hass.config_entries.flow.async_configure(
flows[0]["flow_id"], {"implementation": "eneco"}
)
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://api.toon.eu/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("toonapi.Toon.agreements", return_value=[Agreement(agreement_id=123)]):
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].version == 2
|
from qutebrowser.api import cmdutils, apitypes
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def scroll_px(tab: apitypes.Tab, dx: int, dy: int, count: int = 1) -> None:
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
tab.scroller.delta(dx, dy)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
def scroll(tab: apitypes.Tab, direction: str, count: int = 1) -> None:
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdutils.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
@cmdutils.argument('count', value=cmdutils.Value.count)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(tab: apitypes.Tab, count: int = None,
perc: float = None, horizontal: bool = False) -> None:
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
tab.scroller.before_jump_requested.emit()
tab.scroller.to_perc(x, y)
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def scroll_to_anchor(tab: apitypes.Tab, name: str) -> None:
"""Scroll to the given anchor in the document.
Args:
name: The anchor to scroll to.
"""
tab.scroller.before_jump_requested.emit()
tab.scroller.to_anchor(name)
|
from typing import List, Optional
from pymelcloud import DEVICE_TYPE_ATW, AtwDevice
from pymelcloud.atw_device import (
PROPERTY_OPERATION_MODE,
PROPERTY_TARGET_TANK_TEMPERATURE,
)
from pymelcloud.device import PROPERTY_POWER
from homeassistant.components.water_heater import (
SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN, MelCloudDevice
from .const import ATTR_STATUS
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
):
"""Set up MelCloud device climate based on config_entry."""
mel_devices = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
AtwWaterHeater(mel_device, mel_device.device)
for mel_device in mel_devices[DEVICE_TYPE_ATW]
],
True,
)
class AtwWaterHeater(WaterHeaterEntity):
"""Air-to-Water water heater."""
def __init__(self, api: MelCloudDevice, device: AtwDevice) -> None:
"""Initialize water heater device."""
self._api = api
self._device = device
self._name = device.name
async def async_update(self):
"""Update state from MELCloud."""
await self._api.async_update()
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return f"{self._api.device.serial}"
@property
def name(self):
"""Return the display name of this entity."""
return self._name
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
async def async_turn_on(self) -> None:
"""Turn the entity on."""
await self._device.set({PROPERTY_POWER: True})
async def async_turn_off(self) -> None:
"""Turn the entity off."""
await self._device.set({PROPERTY_POWER: False})
@property
def device_state_attributes(self):
"""Return the optional state attributes with device specific additions."""
data = {ATTR_STATUS: self._device.status}
return data
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def current_operation(self) -> Optional[str]:
"""Return current operation as reported by pymelcloud."""
return self._device.operation_mode
@property
def operation_list(self) -> List[str]:
"""Return the list of available operation modes as reported by pymelcloud."""
return self._device.operation_modes
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._device.tank_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._device.target_tank_temperature
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
await self._device.set(
{
PROPERTY_TARGET_TANK_TEMPERATURE: kwargs.get(
"temperature", self.target_temperature
)
}
)
async def async_set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
await self._device.set({PROPERTY_OPERATION_MODE: operation_mode})
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE
@property
def min_temp(self) -> Optional[float]:
"""Return the minimum temperature."""
return self._device.target_tank_temperature_min
@property
def max_temp(self) -> Optional[float]:
"""Return the maximum temperature."""
return self._device.target_tank_temperature_max
|
import pifacedigitalio as PFIO
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
DOMAIN = "rpi_pfio"
DATA_PFIO_LISTENER = "pfio_listener"
def setup(hass, config):
"""Set up the Raspberry PI PFIO component."""
pifacedigital = PFIO.PiFaceDigital()
hass.data[DATA_PFIO_LISTENER] = PFIO.InputEventListener(chip=pifacedigital)
def cleanup_pfio(event):
"""Stuff to do before stopping."""
PFIO.deinit()
def prepare_pfio(event):
"""Stuff to do when Home Assistant starts."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_pfio)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, prepare_pfio)
PFIO.init()
return True
def write_output(port, value):
"""Write a value to a PFIO."""
PFIO.digital_write(port, value)
def read_input(port):
"""Read a value from a PFIO."""
return PFIO.digital_read(port)
def edge_detect(hass, port, event_callback, settle):
"""Add detection for RISING and FALLING events."""
hass.data[DATA_PFIO_LISTENER].register(
port, PFIO.IODIR_BOTH, event_callback, settle_time=settle
)
def activate_listener(hass):
"""Activate the registered listener events."""
hass.data[DATA_PFIO_LISTENER].activate()
|
import logging
from typing import Any, Callable
from homeassistant.core import HassJob, callback
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.logging import catch_log_exception
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
DATA_DISPATCHER = "dispatcher"
@bind_hass
def dispatcher_connect(
hass: HomeAssistantType, signal: str, target: Callable[..., None]
) -> Callable[[], None]:
"""Connect a callable function to a signal."""
async_unsub = run_callback_threadsafe(
hass.loop, async_dispatcher_connect, hass, signal, target
).result()
def remove_dispatcher() -> None:
"""Remove signal listener."""
run_callback_threadsafe(hass.loop, async_unsub).result()
return remove_dispatcher
@callback
@bind_hass
def async_dispatcher_connect(
hass: HomeAssistantType, signal: str, target: Callable[..., Any]
) -> Callable[[], None]:
"""Connect a callable function to a signal.
This method must be run in the event loop.
"""
if DATA_DISPATCHER not in hass.data:
hass.data[DATA_DISPATCHER] = {}
job = HassJob(
catch_log_exception(
target,
lambda *args: "Exception in {} when dispatching '{}': {}".format(
# Functions wrapped in partial do not have a __name__
getattr(target, "__name__", None) or str(target),
signal,
args,
),
)
)
hass.data[DATA_DISPATCHER].setdefault(signal, []).append(job)
@callback
def async_remove_dispatcher() -> None:
"""Remove signal listener."""
try:
hass.data[DATA_DISPATCHER][signal].remove(job)
except (KeyError, ValueError):
# KeyError is key target listener did not exist
# ValueError if listener did not exist within signal
_LOGGER.warning("Unable to remove unknown dispatcher %s", target)
return async_remove_dispatcher
@bind_hass
def dispatcher_send(hass: HomeAssistantType, signal: str, *args: Any) -> None:
"""Send signal and data."""
hass.loop.call_soon_threadsafe(async_dispatcher_send, hass, signal, *args)
@callback
@bind_hass
def async_dispatcher_send(hass: HomeAssistantType, signal: str, *args: Any) -> None:
"""Send signal and data.
This method must be run in the event loop.
"""
target_list = hass.data.get(DATA_DISPATCHER, {}).get(signal, [])
for job in target_list:
hass.async_add_hass_job(job, *args)
|
from datetime import timedelta
import arrow
from moto import mock_ses
from lemur.tests.factories import NotificationFactory, CertificateFactory
from lemur.tests.test_messaging import verify_sender_email
def test_formatting(certificate):
from lemur.plugins.lemur_slack.plugin import create_expiration_attachments
from lemur.certificates.schemas import certificate_notification_output_schema
data = [certificate_notification_output_schema.dump(certificate).data]
attachment = {
"title": certificate.name,
"color": "danger",
"fields": [
{"short": True, "value": "joe@example.com", "title": "Owner"},
{"short": True, "value": u"Tuesday, December 31, 2047", "title": "Expires"},
{"short": True, "value": 0, "title": "Endpoints Detected"},
],
"title_link": "https://lemur.example.com/#/certificates/{name}".format(
name=certificate.name
),
"mrkdwn_in": ["text"],
"text": "",
"fallback": "",
}
assert attachment == create_expiration_attachments(data)[0]
def get_options():
return [
{"name": "interval", "value": 10},
{"name": "unit", "value": "days"},
{"name": "webhook", "value": "https://slack.com/api/api.test"},
]
@mock_ses() # because email notifications are also sent
def test_send_expiration_notification():
from lemur.notifications.messaging import send_expiration_notifications
verify_sender_email() # emails are sent to owner and security; Slack only used for configured notification
notification = NotificationFactory(plugin_name="slack-notification")
notification.options = get_options()
now = arrow.utcnow()
in_ten_days = now + timedelta(days=10, hours=1) # a bit more than 10 days since we'll check in the future
certificate = CertificateFactory()
certificate.not_after = in_ten_days
certificate.notifications.append(notification)
assert send_expiration_notifications([]) == (3, 0) # owner, Slack, and security
# Currently disabled as the Slack plugin doesn't support this type of notification
# def test_send_rotation_notification(endpoint, source_plugin):
# from lemur.notifications.messaging import send_rotation_notification
# from lemur.deployment.service import rotate_certificate
#
# notification = NotificationFactory(plugin_name="slack-notification")
# notification.options = get_options()
#
# new_certificate = CertificateFactory()
# rotate_certificate(endpoint, new_certificate)
# assert endpoint.certificate == new_certificate
#
# assert send_rotation_notification(new_certificate, notification_plugin=notification.plugin)
# Currently disabled as the Slack plugin doesn't support this type of notification
# def test_send_pending_failure_notification(user, pending_certificate, async_issuer_plugin):
# from lemur.notifications.messaging import send_pending_failure_notification
#
# assert send_pending_failure_notification(pending_certificate, notification_plugin=plugins.get("slack-notification"))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import tensorflow as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 350
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_jpeg(sess, image_data)
return image.shape[0], image.shape[1]
def decode_jpeg(self, sess, image_data):
image = sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
flower_root = os.path.join(dataset_dir, 'flower_photos')
directories = []
class_names = []
for filename in os.listdir(flower_root):
path = os.path.join(flower_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'flowers_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'r').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, 'jpg', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the download and conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
training_filenames = photo_filenames[_NUM_VALIDATION:]
validation_filenames = photo_filenames[:_NUM_VALIDATION]
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the Flowers dataset!')
|
import asyncio
from typing import Optional
from aioesphomeapi import CameraInfo, CameraState
from homeassistant.components import camera
from homeassistant.components.camera import Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import EsphomeEntity, platform_async_setup_entry
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up esphome cameras based on a config entry."""
await platform_async_setup_entry(
hass,
entry,
async_add_entities,
component_key="camera",
info_type=CameraInfo,
entity_type=EsphomeCamera,
state_type=CameraState,
)
class EsphomeCamera(Camera, EsphomeEntity):
"""A camera implementation for ESPHome."""
def __init__(self, entry_id: str, component_key: str, key: int):
"""Initialize."""
Camera.__init__(self)
EsphomeEntity.__init__(self, entry_id, component_key, key)
self._image_cond = asyncio.Condition()
@property
def _static_info(self) -> CameraInfo:
return super()._static_info
@property
def _state(self) -> Optional[CameraState]:
return super()._state
async def _on_state_update(self) -> None:
"""Notify listeners of new image when update arrives."""
await super()._on_state_update()
async with self._image_cond:
self._image_cond.notify_all()
async def async_camera_image(self) -> Optional[bytes]:
"""Return single camera image bytes."""
if not self.available:
return None
await self._client.request_single_image()
async with self._image_cond:
await self._image_cond.wait()
if not self.available:
return None
return self._state.image[:]
async def _async_camera_stream_image(self) -> Optional[bytes]:
"""Return a single camera image in a stream."""
if not self.available:
return None
await self._client.request_image_stream()
async with self._image_cond:
await self._image_cond.wait()
if not self.available:
return None
return self._state.image[:]
async def handle_async_mjpeg_stream(self, request):
"""Serve an HTTP MJPEG stream from the camera."""
return await camera.async_get_still_stream(
request, self._async_camera_stream_image, camera.DEFAULT_CONTENT_TYPE, 0.0
)
|
import asyncio
import logging
import urllib
from urllib.error import HTTPError
from aiohttp import web
from doorbirdpy import DoorBird
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICES,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.util import dt as dt_util, slugify
from .const import (
CONF_EVENTS,
DOMAIN,
DOOR_STATION,
DOOR_STATION_EVENT_ENTITY_IDS,
DOOR_STATION_INFO,
PLATFORMS,
)
from .util import get_doorstation_by_token
_LOGGER = logging.getLogger(__name__)
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the DoorBird component."""
hass.data.setdefault(DOMAIN, {})
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
if DOMAIN in config and CONF_DEVICES in config[DOMAIN]:
for index, doorstation_config in enumerate(config[DOMAIN][CONF_DEVICES]):
if CONF_NAME not in doorstation_config:
doorstation_config[CONF_NAME] = f"DoorBird {index + 1}"
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=doorstation_config
)
)
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
token = event.data.get("token")
if token is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token")
return
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.async_listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DoorBird from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
doorstation_config = entry.data
doorstation_options = entry.options
config_entry_id = entry.entry_id
device_ip = doorstation_config[CONF_HOST]
username = doorstation_config[CONF_USERNAME]
password = doorstation_config[CONF_PASSWORD]
device = DoorBird(device_ip, username, password)
try:
status = await hass.async_add_executor_job(device.ready)
info = await hass.async_add_executor_job(device.info)
except urllib.error.HTTPError as err:
if err.code == HTTP_UNAUTHORIZED:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
raise ConfigEntryNotReady from err
except OSError as oserr:
_LOGGER.error("Failed to setup doorbird at %s: %s", device_ip, oserr)
raise ConfigEntryNotReady from oserr
if not status[0]:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
raise ConfigEntryNotReady
token = doorstation_config.get(CONF_TOKEN, config_entry_id)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
name = doorstation_config.get(CONF_NAME)
events = doorstation_options.get(CONF_EVENTS, [])
doorstation = ConfiguredDoorBird(device, name, events, custom_url, token)
# Subscribe to doorbell or motion events
if not await _async_register_events(hass, doorstation):
raise ConfigEntryNotReady
hass.data[DOMAIN][config_entry_id] = {
DOOR_STATION: doorstation,
DOOR_STATION_INFO: info,
}
entry.add_update_listener(_update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_register_events(hass, doorstation):
try:
await hass.async_add_executor_job(doorstation.register_events, hass)
except HTTPError:
hass.components.persistent_notification.create(
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
config_entry_id = entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation.events = entry.options[CONF_EVENTS]
# Subscribe to doorbell or motion events
await _async_register_events(hass, doorstation)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = False
for importable_option in [CONF_EVENTS]:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, events, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self.events = events
self.doorstation_events = [self._get_event_name(event) for event in self.events]
self._token = token
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass):
"""Register events on device."""
# Get the URL of this server
hass_url = get_url(hass)
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
for event in self.doorstation_events:
self._register_event(hass_url, event)
_LOGGER.info("Successfully registered URL for %s on %s", event, self.name)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(self, hass_url, event):
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if not self.webhook_is_registered(url):
self.device.change_favorite("http", f"Home Assistant ({event})", url)
fav_id = self.get_webhook_id(url)
if not fav_id:
_LOGGER.warning(
'Could not find favorite for URL "%s". ' 'Skipping sensor "%s"',
url,
event,
)
return
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return False
for fav in favs["http"].values():
if fav["value"] == url:
return True
return False
def get_webhook_id(self, url, favs=None) -> str or None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(
status=HTTP_UNAUTHORIZED, text="Invalid token provided."
)
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(status=HTTP_OK, text=message)
event_data[ATTR_ENTITY_ID] = hass.data[DOMAIN][
DOOR_STATION_EVENT_ENTITY_IDS
].get(event)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(status=HTTP_OK, text="OK")
|
import time
from pymyq.const import (
DEVICE_STATE as MYQ_DEVICE_STATE,
DEVICE_STATE_ONLINE as MYQ_DEVICE_STATE_ONLINE,
DEVICE_TYPE as MYQ_DEVICE_TYPE,
DEVICE_TYPE_GATE as MYQ_DEVICE_TYPE_GATE,
KNOWN_MODELS,
MANUFACTURER,
)
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_GATE,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPENING,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
DOMAIN,
MYQ_COORDINATOR,
MYQ_GATEWAY,
MYQ_TO_HASS,
TRANSITION_COMPLETE_DURATION,
TRANSITION_START_DURATION,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
# This parameter is no longer used; keeping it to avoid a breaking change in
# a hotfix, but in a future main release, this should be removed:
vol.Optional(CONF_TYPE): cv.string,
},
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the platform."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
},
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
async_add_entities(
[MyQDevice(coordinator, device) for device in myq.covers.values()], True
)
class MyQDevice(CoordinatorEntity, CoverEntity):
"""Representation of a MyQ cover."""
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
super().__init__(coordinator)
self._device = device
self._last_action_timestamp = 0
self._scheduled_transition_update = None
@property
def device_class(self):
"""Define this cover as a garage door."""
device_type = self._device.device_json.get(MYQ_DEVICE_TYPE)
if device_type is not None and device_type == MYQ_DEVICE_TYPE_GATE:
return DEVICE_CLASS_GATE
return DEVICE_CLASS_GARAGE
@property
def name(self):
"""Return the name of the garage door if any."""
return self._device.name
@property
def available(self):
"""Return if the device is online."""
if not self.coordinator.last_update_success:
return False
# Not all devices report online so assume True if its missing
return self._device.device_json[MYQ_DEVICE_STATE].get(
MYQ_DEVICE_STATE_ONLINE, True
)
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSED
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_CLOSING
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return MYQ_TO_HASS.get(self._device.state) == STATE_OPENING
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._device.device_id
async def async_close_cover(self, **kwargs):
"""Issue close command to cover."""
self._last_action_timestamp = time.time()
await self._device.close()
self._async_schedule_update_for_transition()
async def async_open_cover(self, **kwargs):
"""Issue open command to cover."""
self._last_action_timestamp = time.time()
await self._device.open()
self._async_schedule_update_for_transition()
@callback
def _async_schedule_update_for_transition(self):
self.async_write_ha_state()
# Cancel any previous updates
if self._scheduled_transition_update:
self._scheduled_transition_update()
# Schedule an update for when we expect the transition
# to be completed so the garage door or gate does not
# seem like its closing or opening for a long time
self._scheduled_transition_update = async_call_later(
self.hass,
TRANSITION_COMPLETE_DURATION,
self._async_complete_schedule_update,
)
async def _async_complete_schedule_update(self, _):
"""Update status of the cover via coordinator."""
self._scheduled_transition_update = None
await self.coordinator.async_request_refresh()
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self._device.name,
"manufacturer": MANUFACTURER,
"sw_version": self._device.firmware_version,
}
model = KNOWN_MODELS.get(self._device.device_id[2:4])
if model:
device_info["model"] = model
if self._device.parent_device_id:
device_info["via_device"] = (DOMAIN, self._device.parent_device_id)
return device_info
@callback
def _async_consume_update(self):
if time.time() - self._last_action_timestamp <= TRANSITION_START_DURATION:
# If we just started a transition we need
# to prevent a bouncy state
return
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe to updates."""
self.async_on_remove(
self.coordinator.async_add_listener(self._async_consume_update)
)
async def async_will_remove_from_hass(self):
"""Undo subscription."""
if self._scheduled_transition_update:
self._scheduled_transition_update()
|
import ctypes
import ctypes.util
import struct
class _xcb_reply_t(ctypes.Structure):
# this can be used instead of xcb_intern_atom_reply_t,
# xcb_get_selection_owner_reply_t, etc
_fields_ = [('response_type', ctypes.c_uint8),
('pad0', ctypes.c_uint8),
('sequence', ctypes.c_uint16),
('length', ctypes.c_uint32),
('payload', ctypes.c_uint32)]
class _xcb_cookie_t(ctypes.Structure):
# this can be used instead of xcb_intern_atom_cookie_t,
# xcb_get_selection_owner_cookie_t, etc
_fields_ = [('sequence', ctypes.c_uint)]
_xcb_error_messages = [
None,
'XCB error: socket, pipe and other stream error',
'XCB connection closed: extension unsupported',
'XCB connection closed: insufficient memory',
'XCB connection closed: request length exceeded',
'XCB connection closed: DISPLAY parse error',
'XCB connection closed: invalid screen'
]
class XSettingsError(RuntimeError):
pass
class XSettingsParseError(XSettingsError):
pass
def get_raw_xsettings(display=0):
# initialize the libraries
xcb_library_name = ctypes.util.find_library('xcb')
if xcb_library_name is None:
raise XSettingsError('Xcb library not found')
xcb = ctypes.CDLL(xcb_library_name)
c_library_name = ctypes.util.find_library('c')
if c_library_name is None:
raise XSettingsError('C library not found')
c = ctypes.CDLL(c_library_name)
# set some args and return types
c.free.argtypes = [ctypes.c_void_p]
c.free.restype = None
xcb.xcb_connect.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_int)]
xcb.xcb_connect.restype = ctypes.c_void_p
xcb.xcb_connection_has_error.argtypes = [ctypes.c_void_p]
xcb.xcb_connection_has_error.restype = ctypes.c_int
xcb.xcb_disconnect.argtypes = [ctypes.c_void_p]
xcb.xcb_disconnect.restype = None
xcb.xcb_intern_atom.argtypes = [ctypes.c_void_p, ctypes.c_uint8, ctypes.c_uint16, ctypes.c_char_p]
xcb.xcb_intern_atom.restype = _xcb_cookie_t
xcb.xcb_intern_atom_reply.argtypes = [ctypes.c_void_p, _xcb_cookie_t, ctypes.c_void_p]
xcb.xcb_intern_atom_reply.restype = ctypes.POINTER(_xcb_reply_t)
xcb.xcb_get_selection_owner.argtypes = [ctypes.c_void_p, ctypes.c_uint32]
xcb.xcb_get_selection_owner.restype = _xcb_cookie_t
xcb.xcb_get_selection_owner_reply.argtypes = [ctypes.c_void_p, _xcb_cookie_t, ctypes.c_void_p]
xcb.xcb_get_selection_owner_reply.restype = ctypes.POINTER(_xcb_reply_t)
xcb.xcb_get_property.argtypes = [ctypes.c_void_p, ctypes.c_uint8, ctypes.c_uint32, ctypes.c_uint32,
ctypes.c_uint32, ctypes.c_uint32]
xcb.xcb_get_property.restype = _xcb_cookie_t
xcb.xcb_get_property_reply.argtypes = [ctypes.c_void_p, _xcb_cookie_t, ctypes.c_void_p]
xcb.xcb_get_property_reply.restype = ctypes.c_void_p
xcb.xcb_get_property_value_length.argtypes = [ctypes.c_void_p]
xcb.xcb_get_property_value_length.restype = ctypes.c_int
xcb.xcb_get_property_value.argtypes = [ctypes.c_void_p]
xcb.xcb_get_property_value.restype = ctypes.c_void_p
# open the connection
connection = xcb.xcb_connect(None, None)
error = xcb.xcb_connection_has_error(connection)
if error:
raise XSettingsError(_xcb_error_messages[error])
# get selection atom cookie
buffer = ('_XSETTINGS_S%d' % display).encode()
cookie = xcb.xcb_intern_atom(connection, 0, len(buffer), buffer)
# get selection atom reply
reply = xcb.xcb_intern_atom_reply(connection, cookie, None)
selection_atom = reply.contents.payload
c.free(reply)
# get selection owner cookie
cookie = xcb.xcb_get_selection_owner(connection, selection_atom)
# get selection owner reply
reply = xcb.xcb_get_selection_owner_reply(connection, cookie, None)
window = reply.contents.payload
c.free(reply)
# get settings atom cookie
buffer = b'_XSETTINGS_SETTINGS'
cookie = xcb.xcb_intern_atom(connection, 0, len(buffer), buffer)
# get settings atom reply
reply = xcb.xcb_intern_atom_reply(connection, cookie, None)
settings_atom = reply.contents.payload
c.free(reply)
# get property cookie
cookie = xcb.xcb_get_property(connection, 0, window, settings_atom, 0, 0, 0x2000)
# get property reply
reply = xcb.xcb_get_property_reply(connection, cookie, None)
if reply is not None:
length = xcb.xcb_get_property_value_length(reply)
pointer = xcb.xcb_get_property_value(reply) if length else None
result = ctypes.string_at(pointer, length)
c.free(reply)
# close the connection
xcb.xcb_disconnect(connection)
# handle possible errors
if reply is None or not length:
raise XSettingsError('XSettings not available')
return result
def parse_xsettings(raw_xsettings):
if len(raw_xsettings) < 12:
raise XSettingsParseError('length < 12')
if raw_xsettings[0] not in (0, 1):
raise XSettingsParseError('wrong order byte: %d' % raw_xsettings[0])
byte_order = '<>'[raw_xsettings[0]]
settings_count = struct.unpack(byte_order + 'I', raw_xsettings[8:12])[0]
TypeInteger, TypeString, TypeColor = range(3)
result = {}
raw_xsettings = raw_xsettings[12:]
offset = 0
for i in range(settings_count):
setting_type = raw_xsettings[offset]
offset += 2
name_length = struct.unpack(byte_order + 'H', raw_xsettings[offset:offset + 2])[0]
offset += 2
name = raw_xsettings[offset:offset + name_length]
offset += name_length
if offset & 3:
offset += 4 - (offset & 3)
offset += 4 # skip last-change-serial
if setting_type == TypeInteger:
value = struct.unpack(byte_order + 'I', raw_xsettings[offset:offset + 4])[0]
offset += 4
elif setting_type == TypeString:
value_length = struct.unpack(byte_order + 'I', raw_xsettings[offset:offset + 4])[0]
offset += 4
value = raw_xsettings[offset:offset + value_length]
offset += value_length
if offset & 3:
offset += 4 - (offset & 3)
elif setting_type == TypeColor:
value = struct.unpack(byte_order + 'HHHH', raw_xsettings[offset:offset + 8])
offset += 8
else:
raise XSettingsParseError('Wrong setting type: %d' % setting_type)
result[name] = value
return result
def get_xsettings(display=0):
raw_xsettings = get_raw_xsettings(display)
return parse_xsettings(raw_xsettings)
|
import os
import subprocess
import unittest
class TestGcloud(unittest.TestCase):
def test_version(self):
result = subprocess.run([
'gcloud',
'--version',
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
print("gloud version: ", result)
self.assertEqual(0, result.returncode)
self.assertIn(b'Google Cloud SDK', result.stdout)
|
import pycountry
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ID
from .const import (
CONF_COUNTRY_CODE,
CONF_HOUSE_NUMBER,
CONF_HOUSE_NUMBER_EXTENSION,
CONF_ZIP_CODE,
DEFAULT_COUNTRY_CODE,
)
from .const import DOMAIN # pylint:disable=unused-import
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZIP_CODE): str,
vol.Required(CONF_HOUSE_NUMBER): int,
vol.Optional(CONF_HOUSE_NUMBER_EXTENSION): str,
vol.Optional(CONF_COUNTRY_CODE, default=DEFAULT_COUNTRY_CODE): str,
}
)
class AvriConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Avri config flow."""
VERSION = 1
async def _show_setup_form(self, errors=None):
"""Show the setup form to the user."""
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors=errors or {},
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
if user_input is None:
return await self._show_setup_form()
zip_code = user_input[CONF_ZIP_CODE].replace(" ", "").upper()
errors = {}
if user_input[CONF_HOUSE_NUMBER] <= 0:
errors[CONF_HOUSE_NUMBER] = "invalid_house_number"
return await self._show_setup_form(errors)
if not pycountry.countries.get(alpha_2=user_input[CONF_COUNTRY_CODE]):
errors[CONF_COUNTRY_CODE] = "invalid_country_code"
return await self._show_setup_form(errors)
unique_id = (
f"{zip_code}"
f" "
f"{user_input[CONF_HOUSE_NUMBER]}"
f'{user_input.get(CONF_HOUSE_NUMBER_EXTENSION, "")}'
)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=unique_id,
data={
CONF_ID: unique_id,
CONF_ZIP_CODE: zip_code,
CONF_HOUSE_NUMBER: user_input[CONF_HOUSE_NUMBER],
CONF_HOUSE_NUMBER_EXTENSION: user_input.get(
CONF_HOUSE_NUMBER_EXTENSION, ""
),
CONF_COUNTRY_CODE: user_input[CONF_COUNTRY_CODE],
},
)
|
from __future__ import print_function
import argparse
import fileinput
import os
import sys
_stash = globals()["_stash"]
def main(args):
"""
The main function.
"""
ap = argparse.ArgumentParser()
ap.add_argument('file', nargs='*', help='one or more files to be copied')
ns = ap.parse_args(args)
if not hasattr(_stash, "libdist"):
print(_stash.text_color("Error: libdist not loaded.", "red"))
sys.exit(1)
fileinput.close() # in case it is not closed
try:
_stash.libdist.clipboard_set(u''.join(line for line in fileinput.input(ns.file, openhook=fileinput.hook_encoded("utf-8"))))
except Exception as err:
print(_stash.text_color("pbcopy: {}: {!s}".format(type(err).__name__, err), "red"), file=sys.stderr)
sys.exit(1)
finally:
fileinput.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
from gpiozero import LED, Button
from gpiozero.pins.pigpio import PiGPIOFactory
CONF_BOUNCETIME = "bouncetime"
CONF_INVERT_LOGIC = "invert_logic"
CONF_PULL_MODE = "pull_mode"
DEFAULT_BOUNCETIME = 50
DEFAULT_INVERT_LOGIC = False
DEFAULT_PULL_MODE = "UP"
DOMAIN = "remote_rpi_gpio"
def setup(hass, config):
"""Set up the Raspberry Pi Remote GPIO component."""
return True
def setup_output(address, port, invert_logic):
"""Set up a GPIO as output."""
try:
return LED(
port, active_high=not invert_logic, pin_factory=PiGPIOFactory(address)
)
except (ValueError, IndexError, KeyError):
return None
def setup_input(address, port, pull_mode, bouncetime):
"""Set up a GPIO as input."""
if pull_mode == "UP":
pull_gpio_up = True
elif pull_mode == "DOWN":
pull_gpio_up = False
try:
return Button(
port,
pull_up=pull_gpio_up,
bounce_time=bouncetime,
pin_factory=PiGPIOFactory(address),
)
except (ValueError, IndexError, KeyError, OSError):
return None
def write_output(switch, value):
"""Write a value to a GPIO."""
if value == 1:
switch.on()
if value == 0:
switch.off()
def read_input(button):
"""Read a value from a GPIO."""
return button.is_pressed
|
import logging
import signal
import sys
from types import FrameType
from homeassistant.const import RESTART_EXIT_CODE
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
@callback
@bind_hass
def async_register_signal_handling(hass: HomeAssistant) -> None:
"""Register system signal handler for core."""
if sys.platform != "win32":
@callback
def async_signal_handle(exit_code: int) -> None:
"""Wrap signal handling.
* queue call to shutdown task
* re-instate default handler
"""
hass.loop.remove_signal_handler(signal.SIGTERM)
hass.loop.remove_signal_handler(signal.SIGINT)
hass.async_create_task(hass.async_stop(exit_code))
try:
hass.loop.add_signal_handler(signal.SIGTERM, async_signal_handle, 0)
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
try:
hass.loop.add_signal_handler(signal.SIGINT, async_signal_handle, 0)
except ValueError:
_LOGGER.warning("Could not bind to SIGINT")
try:
hass.loop.add_signal_handler(
signal.SIGHUP, async_signal_handle, RESTART_EXIT_CODE
)
except ValueError:
_LOGGER.warning("Could not bind to SIGHUP")
else:
old_sigterm = None
old_sigint = None
@callback
def async_signal_handle(exit_code: int, frame: FrameType) -> None:
"""Wrap signal handling.
* queue call to shutdown task
* re-instate default handler
"""
signal.signal(signal.SIGTERM, old_sigterm)
signal.signal(signal.SIGINT, old_sigint)
hass.async_create_task(hass.async_stop(exit_code))
try:
old_sigterm = signal.signal(signal.SIGTERM, async_signal_handle)
except ValueError:
_LOGGER.warning("Could not bind to SIGTERM")
try:
old_sigint = signal.signal(signal.SIGINT, async_signal_handle)
except ValueError:
_LOGGER.warning("Could not bind to SIGINT")
|
import datetime as dt
from typing import Callable, Optional, Union
import attr
PublishPayloadType = Union[str, bytes, int, float, None]
@attr.s(slots=True, frozen=True)
class Message:
"""MQTT Message."""
topic: str = attr.ib()
payload: PublishPayloadType = attr.ib()
qos: int = attr.ib()
retain: bool = attr.ib()
subscribed_topic: Optional[str] = attr.ib(default=None)
timestamp: Optional[dt.datetime] = attr.ib(default=None)
MessageCallbackType = Callable[[Message], None]
|
import os
import sys
import tempfile
import shutil
from flexx.util.testing import run_tests_if_main, raises, skip
from flexx.app._asset import solve_dependencies, get_mod_name, module_is_package
from flexx.util.logging import capture_log
from flexx import app
N_STANDARD_ASSETS = 3
test_filename = os.path.join(tempfile.gettempdir(), 'flexx_asset_cache.test')
class WTF:
pass
def test_get_mod_name():
assert get_mod_name(app.PyComponent) == 'flexx.app._component2'
assert get_mod_name(app._component2) == 'flexx.app._component2'
assert get_mod_name(app) == 'flexx.app.__init__'
def test_asset():
# Initialization
asset1 = app.Asset('foo.js', 'foo=3')
assert 'foo.js' in repr(asset1)
assert 'foo.js' == asset1.name
assert asset1.source == 'foo=3'
asset2 = app.Asset('bar.css', 'bar=2')
assert 'bar.css' in repr(asset2)
assert 'bar.css' == asset2.name
assert asset2.source == 'bar=2'
with raises(TypeError):
app.Asset() # :/
with raises(TypeError):
app.Asset('foo.js') # need source
with raises(TypeError):
app.Asset(3, 'bar=2') # name not a str
with raises(ValueError):
app.Asset('foo.png', '') # js and css only
with raises(TypeError):
app.Asset('bar.css', 3) # source not str
with raises(TypeError):
app.Asset('bar.css', ['a']) # source not str
# To html JS
asset = app.Asset('foo.js', 'foo=3;bar=3')
code = asset.to_html('', 0)
assert code.startswith('<script') and code.strip().endswith('</script>')
assert 'foo=3' in code
assert '\n' not in code # because source had no newlines
asset = app.Asset('foo.js', 'foo=3\nbar=3')
code = asset.to_html('', 0)
assert code.startswith('<script') and code.strip().endswith('</script>')
assert '\nfoo=3\nbar=3\n' in code # note the newlines
asset = app.Asset('foo.js', 'foo=3\nbar=3')
code = asset.to_html()
assert code.startswith('<script ') and code.strip().endswith('</script>')
assert 'foo=' not in code
assert '\n' not in code # because its a link
# To html CSS
asset = app.Asset('bar.css', 'foo=3;bar=3')
code = asset.to_html('', 0)
assert code.startswith('<style') and code.strip().endswith('</style>')
assert 'foo=' in code
assert '\n' not in code # because source had no newlines
asset = app.Asset('bar.css', 'foo=3\nbar=3')
code = asset.to_html('', 0)
assert code.startswith('<style') and code.strip().endswith('</style>')
assert '\nfoo=3\nbar=3\n' in code # note the newlines
asset = app.Asset('bar.css', 'foo=3\nbar=3')
code = asset.to_html()
assert code.startswith('<link') and code.strip().endswith('/>')
assert 'foo-' not in code
assert '\n' not in code # becasue its a link
# Test asset via uri
fname = 'file:///home/xx/foobar.css'
with raises(TypeError):
app.Asset('bar.css', fname)
with raises(TypeError):
app.Asset(fname)
def test_remote_asset():
# Prepare example asset info
# Note: use http instead of https to avoid spurious certificate errors
bootstrap_url = 'http://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css'
jquery_url = 'http://code.jquery.com/jquery-3.1.1.slim.min.js'
with open(test_filename + '.js', 'wb') as f:
f.write('var blablabla=7;'.encode())
# JS from url
asset = app.Asset(jquery_url)
assert asset.remote
assert asset.source == jquery_url
assert 'jQuery v3.1.1' in asset.to_string()
assert 'jQuery v3.1.1' in asset.to_html('{}', 0)
assert 'jQuery v3.1.1' not in asset.to_html('{}', 1)
assert 'jQuery v3.1.1' not in asset.to_html('{}', 2)
assert 'jQuery v3.1.1' not in asset.to_html('{}', 3)
assert 'src=' not in asset.to_html('{}', 0)
assert 'src=' in asset.to_html('{}', 1)
assert 'src=' in asset.to_html('{}', 2)
assert 'src=' in asset.to_html('{}', 3)
assert 'http://' in asset.to_html('{}', 1)
assert 'http://' not in asset.to_html('{}', 2)
assert 'http://' in asset.to_html('{}', 3)
# CSS from url
asset = app.Asset(bootstrap_url)
assert asset.remote
assert asset.source == bootstrap_url
assert 'Bootstrap v3.3.7' in asset.to_string()
assert 'Bootstrap v3.3.7' in asset.to_html('{}', 0)
assert 'Bootstrap v3.3.7' not in asset.to_html('{}', 1)
assert 'Bootstrap v3.3.7' not in asset.to_html('{}', 2)
assert 'Bootstrap v3.3.7' not in asset.to_html('{}', 3)
assert 'href=' not in asset.to_html('{}', 0)
assert 'href=' in asset.to_html('{}', 1)
assert 'href=' in asset.to_html('{}', 2)
assert 'href=' in asset.to_html('{}', 3)
assert 'http://' in asset.to_html('{}', 1)
assert 'http://' not in asset.to_html('{}', 2)
assert 'http://' in asset.to_html('{}', 3)
# Falis
with raises(TypeError): # JS from file - not allowed
app.Asset('file://' + test_filename + '.js')
with raises(TypeError):
app.Asset(jquery_url, 'foo=3') # no sources for remote asset
with raises(TypeError):
app.Asset(jquery_url, ['foo=3']) # no sources for remote asset
def test_lazy_asset():
side_effect = []
def lazy():
side_effect.append(True)
return 'spaaam'
asset = app.Asset('foo.js', lazy)
assert asset.source is lazy
assert not side_effect
assert asset.to_string() == 'spaaam'
assert side_effect
while side_effect:
side_effect.pop(0)
assert asset.to_string() == 'spaaam'
assert not side_effect
# Fail
def lazy_wrong():
return None
asset = app.Asset('foo.js', lazy_wrong)
assert asset.source is lazy_wrong
with raises(ValueError):
asset.to_string()
def test_bundle():
try:
from flexx import ui
except ImportError:
skip('no flexx.ui')
store = {}
m1 = app.JSModule('flexx.ui.widgets._button', store)
m1.add_variable('Button')
m2 = app.JSModule('flexx.ui.widgets._tree', store)
m2.add_variable('TreeWidget')
m3 = store['flexx.ui._widget'] # because its a dep of the above
# JS bundle
bundle = app.Bundle('flexx.ui.js')
assert 'flexx.ui' in repr(bundle)
bundle.add_module(m1)
bundle.add_module(m2)
bundle.add_module(m3)
# Modules are sorted
assert bundle.modules == (m3, m1, m2)
# Deps are agregated
assert 'flexx.app.js' in bundle.deps
assert 'flexx.app._component2.js' in bundle.deps
assert not any('flexx.ui' in dep for dep in bundle.deps)
# Strings are combined
code = bundle.to_string()
assert '$Widget =' in code
assert '$Button =' in code
assert '$TreeWidget =' in code
# CSS bundle
bundle = app.Bundle('flexx.ui.css')
bundle.add_module(m1)
bundle.add_module(m2)
bundle.add_module(m3)
#
code = bundle.to_string()
assert '.Widget =' not in code
assert '.flx-Widget {' in code
assert '.flx-TreeWidget {' in code
# This works too
bundle = app.Bundle('-foo.js')
bundle.add_module(m1)
# But this does not
bundle = app.Bundle('foo.js')
with raises(ValueError):
bundle.add_module(m1)
# Assets can be bundled too
bundle = app.Bundle('flexx.ui.css')
bundle.add_module(m1)
bundle.add_module(m2)
a1 = app.Asset('foo.css', 'foo-xxx')
a2 = app.Asset('bar.css', 'bar-yyy')
bundle.add_asset(a1)
bundle.add_asset(a2)
assert a1 in bundle.assets
assert a2 in bundle.assets
code = bundle.to_string()
assert 'foo-xxx' in code
assert 'bar-yyy' in code
with raises(TypeError):
bundle.add_asset()
with raises(TypeError):
bundle.add_asset(3)
with raises(TypeError):
bundle.add_asset(bundle) # no bundles
## Sorting
class Thing:
""" An object that can be sorted with solve_dependencies().
"""
def __init__(self, name, deps):
self.name = name
self.deps = deps
def test_dependency_resolution_1():
""" No deps, maintain order. """
a1 = Thing('a1.js', [])
a2 = Thing('a2.js', [])
a3 = Thing('a3.js', [])
aa = a1, a2, a3
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['a1.js', 'a2.js', 'a3.js']
def test_dependency_resolution_2():
""" One chain of deps """
a1 = Thing('a1.js', ['b1.js'])
b1 = Thing('b1.js', ['c1.js'])
c1 = Thing('c1.js', ['d1.js'])
d1 = Thing('d1.js', ['e1.js'])
e1 = Thing('e1.js', [])
# e1 = Thing('e1.js', '', ['f1.js'])
# f1 = Thing('f1.js', '', ['g1.js'])
# g1 = Thing('g1.js', '', [])
aa = a1, b1, c1, d1, e1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['e1.js', 'd1.js', 'c1.js', 'b1.js', 'a1.js']
aa = a1, d1, e1, b1, c1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['e1.js', 'd1.js', 'c1.js', 'b1.js', 'a1.js']
def test_dependency_resolution_3():
""" Unkown deps are ignored (but warned for) """
a1 = Thing('a1.js', ['b1.js'])
b1 = Thing('b1.js', ['bar.js', 'c1.js'])
c1 = Thing('c1.js', ['d1.js', 'foo.js'])
d1 = Thing('d1.js', ['e1.js'])
e1 = Thing('e1.js', [])
aa = a1, b1, c1, d1, e1
with capture_log('warning') as logs:
aa = solve_dependencies(aa, warn_missing=True)
assert logs and 'missing dependency' in logs[0]
assert [a.name for a in aa] == ['e1.js', 'd1.js', 'c1.js', 'b1.js', 'a1.js']
def test_dependency_resolution_4():
""" Circular deps """
a1 = Thing('a1.js', ['b1.js'])
b1 = Thing('b1.js', ['c1.js'])
c1 = Thing('c1.js', ['d1.js'])
d1 = Thing('d1.js', ['e1.js', 'a1.js'])
e1 = Thing('e1.js', [])
aa = a1, b1, c1, d1, e1
with raises(RuntimeError):
aa = solve_dependencies(aa)
def test_dependency_resolution_5():
""" Two chains """
a1 = Thing('a1.js', ['b1.js'])
b1 = Thing('b1.js', ['c1.js'])
c1 = Thing('c1.js', ['d1.js'])
a2 = Thing('a2.js', ['b2.js'])
b2 = Thing('b2.js', ['c2.js'])
c2 = Thing('c2.js', ['d2.js'])
# First the chain 1
aa = a1, b1, c1, a2, b2, c2
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['c1.js', 'b1.js', 'a1.js', 'c2.js', 'b2.js', 'a2.js']
# First the chain 2
aa = a2, b2, c2, a1, b1, c1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == [ 'c2.js', 'b2.js', 'a2.js', 'c1.js', 'b1.js', 'a1.js']
# Mix, put el from chain 1 first
aa = a1, a2, b1, b2, c1, c2
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['c1.js', 'b1.js', 'a1.js', 'c2.js', 'b2.js', 'a2.js']
# Mix, put el from chain 2 first
aa = a2, a1, b1, b2, c1, c2
aa = solve_dependencies(aa)
assert [a.name for a in aa] == [ 'c2.js', 'b2.js', 'a2.js', 'c1.js', 'b1.js', 'a1.js']
def test_dependency_resolution_6():
""" Multiple deps - order """
a1 = Thing('a1.js', ['b1.js', 'b2.js'])
b1 = Thing('b1.js', ['c1.js', 'c2.js'])
b2 = Thing('b2.js', ['c2.js', 'c3.js'])
c1 = Thing('c1.js', [])
c2 = Thing('c2.js', [])
c3 = Thing('c3.js', [])
aa = a1, b1, b2, c1, c2, c3
aa = solve_dependencies(aa)
assert [a.name for a in aa] == [ 'c1.js', 'c2.js', 'b1.js', 'c3.js', 'b2.js', 'a1.js']
def test_dependency_resolution_7():
""" Shared deps """
a1 = Thing('a1.js', ['b1.js', 'b2.js'])
b1 = Thing('b1.js', ['c1.js'])
b2 = Thing('b2.js', ['d1.js'])
c1 = Thing('c1.js', ['d1.js'])
d1 = Thing('d1.js', [])
aa = a1, b1, b2, c1, d1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['d1.js', 'c1.js', 'b1.js', 'b2.js', 'a1.js']
def test_dependency_resolution_8():
""" Position of singleton thing """
a0 = Thing('a0.js', [])
a1 = Thing('a1.js', ['b1.js', 'b2.js'])
b1 = Thing('b1.js', ['c1.js'])
b2 = Thing('b2.js', ['d1.js'])
c1 = Thing('c1.js', ['d1.js'])
d1 = Thing('d1.js', [])
# Stay in front
# \/
aa = a0, a1, b1, b2, c1, d1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['a0.js', 'd1.js', 'c1.js', 'b1.js', 'b2.js', 'a1.js']
# Get send to back - after a1, and a1 gets send to back due to its deps
# \/
aa = a1, a0, b1, b2, c1, d1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['d1.js', 'c1.js', 'b1.js', 'b2.js', 'a1.js', 'a0.js']
# Stay behind b1
# \/
aa = b1, a0, a1, b2, c1, d1
aa = solve_dependencies(aa)
assert [a.name for a in aa] == ['d1.js', 'c1.js', 'b1.js', 'a0.js', 'b2.js', 'a1.js']
run_tests_if_main()
|
import json
import logging
from nacl.encoding import Base64Encoder
from nacl.secret import SecretBox
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import (
SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_GPS,
)
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE, STATE_HOME
from homeassistant.util import decorator, slugify
from .helper import supports_encryption
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext, encoder=Base64Encoder)
return (SecretBox.KEY_SIZE, decrypt)
def _parse_topic(topic, subscribe_topic):
"""Parse an MQTT topic {sub_topic}/user/dev, return (user, dev) tuple.
Async friendly.
"""
subscription = subscribe_topic.split("/")
try:
user_index = subscription.index("#")
except ValueError:
_LOGGER.error("Can't parse subscription topic: '%s'", subscribe_topic)
raise
topic_list = topic.split("/")
try:
user, device = topic_list[user_index], topic_list[user_index + 1]
except IndexError:
_LOGGER.error("Can't parse topic: '%s'", topic)
raise
return user, device
def _parse_see_args(message, subscribe_topic):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
user, device = _parse_topic(message["topic"], subscribe_topic)
dev_id = slugify(f"{user}_{device}")
kwargs = {"dev_id": dev_id, "host_name": user, "attributes": {}}
if message["lat"] is not None and message["lon"] is not None:
kwargs["gps"] = (message["lat"], message["lon"])
else:
kwargs["gps"] = None
if "acc" in message:
kwargs["gps_accuracy"] = message["acc"]
if "batt" in message:
kwargs["battery"] = message["batt"]
if "vel" in message:
kwargs["attributes"]["velocity"] = message["vel"]
if "tid" in message:
kwargs["attributes"]["tid"] = message["tid"]
if "addr" in message:
kwargs["attributes"]["address"] = message["addr"]
if "cog" in message:
kwargs["attributes"]["course"] = message["cog"]
if "bs" in message:
kwargs["attributes"]["battery_status"] = message["bs"]
if "t" in message:
if message["t"] in ("c", "u"):
kwargs["source_type"] = SOURCE_TYPE_GPS
if message["t"] == "b":
kwargs["source_type"] = SOURCE_TYPE_BLUETOOTH_LE
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs["gps"] = (
zone.attributes[ATTR_LATITUDE],
zone.attributes[ATTR_LONGITUDE],
)
kwargs["gps_accuracy"] = zone.attributes["radius"]
kwargs["location_name"] = location
return kwargs
def _decrypt_payload(secret, topic, ciphertext):
"""Decrypt encrypted payload."""
try:
if supports_encryption():
keylen, decrypt = get_cipher()
else:
_LOGGER.warning("Ignoring encrypted payload because nacl not installed")
return None
except OSError:
_LOGGER.warning("Ignoring encrypted payload because nacl not installed")
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known for topic %s",
topic,
)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
try:
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning(
"Ignoring encrypted payload because unable to decrypt using key for topic %s",
topic,
)
return None
def encrypt_message(secret, topic, message):
"""Encrypt message."""
keylen = SecretBox.KEY_SIZE
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Unable to encrypt payload because no decryption key known " "for topic %s",
topic,
)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
try:
message = message.encode("utf-8")
payload = SecretBox(key).encrypt(message, encoder=Base64Encoder)
_LOGGER.debug("Encrypted message: %s to %s", message, payload)
return payload.decode("utf-8")
except ValueError:
_LOGGER.warning("Unable to encrypt message for topic %s", topic)
return None
@HANDLERS.register("location")
async def async_handle_location_message(hass, context, message):
"""Handle a location message."""
if not context.async_valid_accuracy(message):
return
if context.events_only:
_LOGGER.debug("Location update ignored due to events_only setting")
return
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if context.regions_entered[dev_id]:
_LOGGER.debug(
"Location update ignored, inside region %s", context.regions_entered[-1]
)
return
context.async_see(**kwargs)
context.async_see_beacons(hass, dev_id, kwargs)
async def _async_transition_message_enter(hass, context, message, location):
"""Execute enter event."""
zone = hass.states.get(f"zone.{slugify(location)}")
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if zone is None and message.get("t") == "b":
# Not a HA zone, and a beacon so mobile beacon.
# kwargs will contain the lat/lon of the beacon
# which is not where the beacon actually is
# and is probably set to 0/0
beacons = context.mobile_beacons_active[dev_id]
if location not in beacons:
beacons.add(location)
_LOGGER.info("Added beacon %s", location)
context.async_see_beacons(hass, dev_id, kwargs)
else:
# Normal region
regions = context.regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
context.async_see(**kwargs)
context.async_see_beacons(hass, dev_id, kwargs)
async def _async_transition_message_leave(hass, context, message, location):
"""Execute leave event."""
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
regions = context.regions_entered[dev_id]
if location in regions:
regions.remove(location)
beacons = context.mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
context.async_see_beacons(hass, dev_id, kwargs)
else:
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(f"zone.{slugify(new_region)}")
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
context.async_see(**kwargs)
context.async_see_beacons(hass, dev_id, kwargs)
return
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if context.async_valid_accuracy(message):
context.async_see(**kwargs)
context.async_see_beacons(hass, dev_id, kwargs)
@HANDLERS.register("transition")
async def async_handle_transition_message(hass, context, message):
"""Handle a transition message."""
if message.get("desc") is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app"
)
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = message["desc"].lstrip("-")
# Create a layer of indirection for Owntracks instances that may name
# regions differently than their HA names
if location in context.region_mapping:
location = context.region_mapping[location]
if location.lower() == "home":
location = STATE_HOME
if message["event"] == "enter":
await _async_transition_message_enter(hass, context, message, location)
elif message["event"] == "leave":
await _async_transition_message_leave(hass, context, message, location)
else:
_LOGGER.error(
"Misformatted mqtt msgs, _type=transition, event=%s", message["event"]
)
async def async_handle_waypoint(hass, name_base, waypoint):
"""Handle a waypoint."""
name = waypoint["desc"]
pretty_name = f"{name_base} - {name}"
lat = waypoint["lat"]
lon = waypoint["lon"]
rad = waypoint["rad"]
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
return
zone = zone_comp.Zone(
{
zone_comp.CONF_NAME: pretty_name,
zone_comp.CONF_LATITUDE: lat,
zone_comp.CONF_LONGITUDE: lon,
zone_comp.CONF_RADIUS: rad,
zone_comp.CONF_ICON: zone_comp.ICON_IMPORT,
zone_comp.CONF_PASSIVE: False,
},
False,
)
zone.hass = hass
zone.entity_id = entity_id
zone.async_write_ha_state()
@HANDLERS.register("waypoint")
@HANDLERS.register("waypoints")
async def async_handle_waypoints_message(hass, context, message):
"""Handle a waypoints message."""
if not context.import_waypoints:
return
if context.waypoint_whitelist is not None:
user = _parse_topic(message["topic"], context.mqtt_topic)[0]
if user not in context.waypoint_whitelist:
return
if "waypoints" in message:
wayps = message["waypoints"]
else:
wayps = [message]
_LOGGER.info("Got %d waypoints from %s", len(wayps), message["topic"])
name_base = " ".join(_parse_topic(message["topic"], context.mqtt_topic))
for wayp in wayps:
await async_handle_waypoint(hass, name_base, wayp)
@HANDLERS.register("encrypted")
async def async_handle_encrypted_message(hass, context, message):
"""Handle an encrypted message."""
if "topic" not in message and isinstance(context.secret, dict):
_LOGGER.error("You cannot set per topic secrets when using HTTP")
return
plaintext_payload = _decrypt_payload(
context.secret, message.get("topic"), message["data"]
)
if plaintext_payload is None:
return
decrypted = json.loads(plaintext_payload)
if "topic" in message and "topic" not in decrypted:
decrypted["topic"] = message["topic"]
await async_handle_message(hass, context, decrypted)
@HANDLERS.register("lwt")
@HANDLERS.register("configuration")
@HANDLERS.register("beacon")
@HANDLERS.register("cmd")
@HANDLERS.register("steps")
@HANDLERS.register("card")
async def async_handle_not_impl_msg(hass, context, message):
"""Handle valid but not implemented message types."""
_LOGGER.debug("Not handling %s message: %s", message.get("_type"), message)
async def async_handle_unsupported_msg(hass, context, message):
"""Handle an unsupported or invalid message type."""
_LOGGER.warning("Received unsupported message type: %s", message.get("_type"))
async def async_handle_message(hass, context, message):
"""Handle an OwnTracks message."""
msgtype = message.get("_type")
_LOGGER.debug("Received %s", message)
handler = HANDLERS.get(msgtype, async_handle_unsupported_msg)
await handler(hass, context, message)
|
from __future__ import print_function
import tests
from pyVim import connect
from pyVmomi import vim
class VirtualMachineTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('vm_nic_data.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='never')
def test_vm_nic_data(self):
data = {'ESXi-5.5-16': [],
'ESXi-5.5-17': [],
'ESXi-5.5-18': [],
'ESXi11': ['00:0c:29:e1:e0:f8'],
'ESXi12': ['00:50:56:b4:3c:3c'],
'ESXi20': ['00:50:56:b4:fc:9b', '00:50:56:b4:28:e7'],
'ESXi21': ['00:50:56:b4:8d:7a', '00:50:56:b4:39:b8'],
'ESXi22': ['00:0c:29:36:b5:5a', '00:0c:29:36:b5:64'],
'ESXi23': ['00:50:56:b4:91:f9', '00:50:56:b4:90:9f'],
'ESXi38-v5.0': ['00:0c:29:ce:6a:d8', '00:0c:29:ce:6a:e2'],
'MARVEL-Agents_of_Atlas': [],
'MARVEL-Alex_Power': [],
'MARVEL-Archangel': [],
'MARVEL-Freak': [],
'MARVEL-Hepzibah': [],
'MARVEL-Mach_IV': [],
'MARVEL-Serpent_Society': [],
'MARVEL-Spectrum': [],
'MARVEL-The_Hand': [],
'MARVEL-Vanisher_(Ultimate)': [],
'VCVA-5.5u1-11': ['00:0c:29:9d:a3:8c'],
'VCVA-5.5u1-14': ['00:0c:29:75:21:2e'],
'VCVA33': ['00:0c:29:e3:f9:f7'],
'VCVA36': ['00:0c:29:44:8b:76'],
'VCVA37-v5.0': ['00:50:56:b4:89:db'],
'box': ['00:50:56:82:28:7d'],
'box_copy': ['00:50:56:82:34:02'],
'esx4.1.0': ['00:0c:29:1f:ec:ba', '00:0c:29:1f:ec:c4']}
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='my_password')
content = si.RetrieveContent()
virtual_machines = content.viewManager.CreateContainerView(
content.rootFolder, [vim.VirtualMachine], True)
for virtual_machine in virtual_machines.view:
name = virtual_machine.name
self.assertTrue(name in data.keys())
macs = data[name]
if virtual_machine.guest:
for net in virtual_machine.guest.net:
self.assertTrue(net.macAddress in macs)
|
import time
import random
import logging
import threading
from plumbum.commands import BaseCommand, run_proc
from plumbum.commands.processes import ProcessExecutionError
from plumbum.lib import six
from plumbum.machines.base import PopenAddons
class ShellSessionError(Exception):
"""Raises when something goes wrong when calling
:func:`ShellSession.popen <plumbum.session.ShellSession.popen>`"""
pass
class SSHCommsError(ProcessExecutionError, EOFError):
"""Raises when the communication channel can't be created on the
remote host or it times out."""
class SSHCommsChannel2Error(SSHCommsError):
"""Raises when channel 2 (stderr) is not available"""
class IncorrectLogin(SSHCommsError):
"""Raises when incorrect login credentials are provided"""
class HostPublicKeyUnknown(SSHCommsError):
"""Raises when the host public key isn't known"""
shell_logger = logging.getLogger("plumbum.shell")
#===================================================================================================
# Shell Session Popen
#===================================================================================================
class MarkedPipe(object):
"""A pipe-like object from which you can read lines; the pipe will return report EOF (the
empty string) when a special marker is detected"""
__slots__ = ["pipe", "marker", "__weakref__"]
def __init__(self, pipe, marker):
self.pipe = pipe
self.marker = marker
if six.PY3:
self.marker = six.bytes(self.marker, "ascii")
def close(self):
"""'Closes' the marked pipe; following calls to ``readline`` will return """ ""
# consume everything
while self.readline():
pass
self.pipe = None
def readline(self):
"""Reads the next line from the pipe; returns "" when the special marker is reached.
Raises ``EOFError`` if the underlying pipe has closed"""
if self.pipe is None:
return six.b("")
line = self.pipe.readline()
if not line:
raise EOFError()
if line.strip() == self.marker:
self.pipe = None
line = six.b("")
return line
class SessionPopen(PopenAddons):
"""A shell-session-based ``Popen``-like object (has the following attributes: ``stdin``,
``stdout``, ``stderr``, ``returncode``)"""
def __init__(self, proc, argv, isatty, stdin, stdout, stderr, encoding):
self.proc = proc
self.argv = argv
self.isatty = isatty
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.custom_encoding = encoding
self.returncode = None
self._done = False
def poll(self):
"""Returns the process' exit code or ``None`` if it's still running"""
if self._done:
return self.returncode
else:
return None
def wait(self):
"""Waits for the process to terminate and returns its exit code"""
self.communicate()
return self.returncode
def communicate(self, input=None):
"""Consumes the process' stdout and stderr until the it terminates.
:param input: An optional bytes/buffer object to send to the process over stdin
:returns: A tuple of (stdout, stderr)
"""
stdout = []
stderr = []
sources = [("1", stdout, self.stdout)]
if not self.isatty:
# in tty mode, stdout and stderr are unified
sources.append(("2", stderr, self.stderr))
i = 0
while sources:
if input:
chunk = input[:1000]
self.stdin.write(chunk)
self.stdin.flush()
input = input[1000:]
i = (i + 1) % len(sources)
name, coll, pipe = sources[i]
try:
line = pipe.readline()
shell_logger.debug("%s> %r", name, line)
except EOFError:
shell_logger.debug("%s> Nothing returned.", name)
self.proc.poll()
returncode = self.proc.returncode
stdout = six.b("").join(stdout).decode(self.custom_encoding, "ignore")
stderr = six.b("").join(stderr).decode(self.custom_encoding, "ignore")
argv = self.argv.decode(self.custom_encoding, "ignore").split(";")[:1]
if returncode == 5:
raise IncorrectLogin(
argv, returncode, stdout, stderr,
message="Incorrect username or password provided")
elif returncode == 6:
raise HostPublicKeyUnknown(
argv, returncode, stdout, stderr,
message="The authenticity of the host can't be established")
elif returncode != 0:
raise SSHCommsError(
argv, returncode, stdout, stderr,
message="SSH communication failed")
elif name == "2":
raise SSHCommsChannel2Error(
argv, returncode, stdout, stderr,
message="No stderr result detected. Does the remote have Bash as the default shell?")
else:
raise SSHCommsError(
argv, returncode, stdout, stderr,
message="No communication channel detected. Does the remote exist?")
if not line:
del sources[i]
else:
coll.append(line)
if self.isatty:
stdout.pop(0) # discard first line of prompt
try:
self.returncode = int(stdout.pop(-1))
except (IndexError, ValueError):
self.returncode = "Unknown"
self._done = True
stdout = six.b("").join(stdout)
stderr = six.b("").join(stderr)
return stdout, stderr
class ShellSession(object):
"""An abstraction layer over *shell sessions*. A shell session is the execution of an
interactive shell (``/bin/sh`` or something compatible), over which you may run commands
(sent over stdin). The output of is then read from stdout and stderr. Shell sessions are
less "robust" than executing a process on its own, and they are susseptible to all sorts
of malformatted-strings attacks, and there is little benefit from using them locally.
However, they can greatly speed up remote connections, and are required for the implementation
of :class:`SshMachine <plumbum.machines.remote.SshMachine>`, as they allow us to send multiple
commands over a single SSH connection (setting up separate SSH connections incurs a high
overhead). Try to avoid using shell sessions, unless you know what you're doing.
Instances of this class may be used as *context-managers*.
:param proc: The underlying shell process (with open stdin, stdout and stderr)
:param encoding: The encoding to use for the shell session. If ``"auto"``, the underlying
process' encoding is used.
:param isatty: If true, assume the shell has a TTY and that stdout and stderr are unified
:param connect_timeout: The timeout to connect to the shell, after which, if no prompt
is seen, the shell process is killed
"""
def __init__(self, proc, encoding="auto", isatty=False, connect_timeout=5):
self.proc = proc
self.custom_encoding = proc.custom_encoding if encoding == "auto" else encoding
self.isatty = isatty
self._lock = threading.RLock()
self._current = None
if connect_timeout:
def closer():
shell_logger.error("Connection to %s timed out (%d sec)", proc,
connect_timeout)
self.close()
timer = threading.Timer(connect_timeout, self.close)
timer.start()
try:
self.run("")
finally:
if connect_timeout:
timer.cancel()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __del__(self):
try:
self.close()
except Exception:
pass
def alive(self):
"""Returns ``True`` if the underlying shell process is alive, ``False`` otherwise"""
return self.proc and self.proc.poll() is None
def close(self):
"""Closes (terminates) the shell session"""
if not self.alive():
return
try:
self.proc.stdin.write(six.b("\nexit\n\n\nexit\n\n"))
self.proc.stdin.flush()
time.sleep(0.05)
except (ValueError, EnvironmentError):
pass
for p in [self.proc.stdin, self.proc.stdout, self.proc.stderr]:
try:
p.close()
except Exception:
pass
try:
self.proc.kill()
except EnvironmentError:
pass
self.proc = None
def popen(self, cmd):
"""Runs the given command in the shell, adding some decoration around it. Only a single
command can be executed at any given time.
:param cmd: The command (string or :class:`Command <plumbum.commands.BaseCommand>` object)
to run
:returns: A :class:`SessionPopen <plumbum.session.SessionPopen>` instance
"""
if self.proc is None:
raise ShellSessionError("Shell session has already been closed")
if self._current and not self._current._done:
raise ShellSessionError(
"Each shell may start only one process at a time")
if isinstance(cmd, BaseCommand):
full_cmd = cmd.formulate(1)
else:
full_cmd = cmd
marker = "--.END%s.--" % (time.time() * random.random(), )
if full_cmd.strip():
full_cmd += " ; "
else:
full_cmd = "true ; "
full_cmd += "echo $? ; echo '%s'" % (marker, )
if not self.isatty:
full_cmd += " ; echo '%s' 1>&2" % (marker, )
if self.custom_encoding:
full_cmd = full_cmd.encode(self.custom_encoding)
shell_logger.debug("Running %r", full_cmd)
self.proc.stdin.write(full_cmd + six.b("\n"))
self.proc.stdin.flush()
self._current = SessionPopen(
self.proc, full_cmd, self.isatty, self.proc.stdin,
MarkedPipe(self.proc.stdout, marker),
MarkedPipe(self.proc.stderr, marker), self.custom_encoding)
return self._current
def run(self, cmd, retcode=0):
"""Runs the given command
:param cmd: The command (string or :class:`Command <plumbum.commands.BaseCommand>` object)
to run
:param retcode: The expected return code (0 by default). Set to ``None`` in order to
ignore erroneous return codes
:returns: A tuple of (return code, stdout, stderr)
"""
with self._lock:
return run_proc(self.popen(cmd), retcode)
|
import os
from glob import glob
from io import StringIO
from django.core.management import call_command
from django.test import SimpleTestCase, TestCase
from django.test.utils import override_settings
from weblate.trans.tests.utils import TempDirMixin
class CommandTests(SimpleTestCase, TempDirMixin):
def setUp(self):
self.create_temp()
self.beat = os.path.join(self.tempdir, "beat")
self.beat_db = os.path.join(self.tempdir, "beat.db")
def tearDown(self):
self.remove_temp()
def check_beat(self):
self.assertTrue(glob(self.beat + "*"))
def test_none(self):
with override_settings(CELERY_BEAT_SCHEDULE_FILENAME=self.beat):
call_command("cleanup_celery")
self.check_beat()
def test_broken(self):
for name in (self.beat, self.beat_db):
with open(name, "wb") as handle:
handle.write(b"\x00")
with override_settings(CELERY_BEAT_SCHEDULE_FILENAME=self.beat):
call_command("cleanup_celery")
self.check_beat()
def test_queues(self):
output = StringIO()
call_command("celery_queues", stdout=output)
self.assertIn("celery:", output.getvalue())
class DBCommandTests(TestCase):
def test_stats(self):
output = StringIO()
call_command("ensure_stats", stdout=output)
self.assertEqual("", output.getvalue())
|
import csv
import io
import posixpath
import re
from absl import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
IOR_DIR = '%s/ior' % linux_packages.INSTALL_DIR
GIT_REPO = 'https://github.com/hpc/ior'
GIT_TAG = '945fba2aa2d571e8babc4f5f01e78e9f5e6e193e'
_METADATA_KEYS = [
'Operation', '#Tasks', 'segcnt', 'blksiz', 'xsize', 'aggsize', 'API', 'fPP',
]
_MDTEST_RESULT_REGEX = (r'\s*(.*?)\s*:\s*(\d+\.\d+)\s*(\d+\.\d+)'
r'\s*(\d+\.\d+)\s*(\d+\.\d+)')
_MDTEST_SUMMARY_REGEX = r'(\d+) tasks, (\d+) files[^\n]*\n\s*\n(.*?)\n\s*\n'
def Install(vm):
"""Installs IOR on the VM."""
vm.Install('openmpi')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, IOR_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(IOR_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./bootstrap && ./configure && make && '
'sudo make install'.format(IOR_DIR))
def Uninstall(vm):
"""Uninstalls IOR on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(IOR_DIR))
def RunIOR(master_vm, num_tasks, script_path):
"""Runs IOR against the master VM."""
directory = master_vm.scratch_disks[0].mount_point
ior_cmd = (
'cd {directory} && '
'mpiexec -oversubscribe -machinefile ~/MACHINEFILE -n {num_tasks} '
'ior -f {script_path}'
).format(directory=directory, num_tasks=num_tasks, script_path=script_path)
stdout, _ = master_vm.RobustRemoteCommand(ior_cmd)
return ParseIORResults(stdout)
def ParseIORResults(test_output):
""""Parses the test output and returns samples."""
random_offsets = (ordering == 'random offsets' for ordering in
re.findall('ordering in a file = (.*)', test_output))
match = re.search(
'Summary of all tests:\n(.*?)Finished', test_output, re.DOTALL)
fp = io.StringIO(re.sub(' +', ' ', match.group(1)))
result_dicts = csv.DictReader(fp, delimiter=' ')
results = []
for result_dict in result_dicts:
metadata = {'random_offsets': next(random_offsets)}
for key in _METADATA_KEYS:
metadata[key] = result_dict[key]
bandwidth = float(result_dict['Mean(MiB)'])
iops = float(result_dict['Mean(OPs)'])
results.extend([
sample.Sample('Bandwidth', bandwidth, 'MiB/s', metadata),
sample.Sample('IOPS', iops, 'OPs/s', metadata)
])
return results
def RunMdtest(master_vm, num_tasks, mdtest_args):
"""Run mdtest against the master vm."""
directory = posixpath.join(master_vm.scratch_disks[0].mount_point, 'mdtest')
mdtest_cmd = (
'mpiexec -oversubscribe -machinefile MACHINEFILE -n {num_tasks} '
'mdtest -d {directory} {additional_args}'
).format(
directory=directory, num_tasks=num_tasks, additional_args=mdtest_args
)
stdout, _ = master_vm.RobustRemoteCommand(mdtest_cmd)
return ParseMdtestResults(stdout)
def ParseMdtestResults(test_output):
"""Parses the test output and returns samples."""
results = []
match = re.search('Command line used: (.*)', test_output)
command_line = match.group(1).strip()
dir_per_task = '-u' in command_line
summaries = re.findall(_MDTEST_SUMMARY_REGEX, test_output, re.DOTALL)
for num_tasks, num_files, summary in summaries:
metadata = {
'command_line': command_line, 'num_tasks': num_tasks,
'drop_caches': FLAGS.mdtest_drop_caches,
'num_files': num_files, 'dir_per_task': dir_per_task
}
result_lines = re.findall(_MDTEST_RESULT_REGEX, summary)
for result_line in result_lines:
op_type, max_ops, min_ops, mean_ops, std_dev = result_line
if not float(mean_ops):
continue
results.append(sample.Sample(
op_type + ' (Mean)', float(mean_ops), 'OPs/s', metadata))
if float(std_dev):
results.extend([
sample.Sample(
op_type + ' (Max)', float(max_ops), 'OPs/s', metadata),
sample.Sample(
op_type + ' (Min)', float(min_ops), 'OPs/s', metadata),
sample.Sample(
op_type + ' (Std Dev)', float(std_dev), 'OPs/s', metadata)
])
return results
|
from collections import namedtuple
from datetime import timedelta
import logging
from pynetio import Netio
import voluptuous as vol
from homeassistant import util
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_START_DATE = "start_date"
ATTR_TOTAL_CONSUMPTION_KWH = "total_energy_kwh"
CONF_OUTLETS = "outlets"
DEFAULT_PORT = 1234
DEFAULT_USERNAME = "admin"
Device = namedtuple("device", ["netio", "entities"])
DEVICES = {}
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
REQ_CONF = [CONF_HOST, CONF_OUTLETS]
URL_API_NETIO_EP = "/api/netio/{host}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_OUTLETS): {cv.string: cv.string},
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Netio platform."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
if not DEVICES:
hass.http.register_view(NetioApiView)
dev = Netio(host, port, username, password)
DEVICES[host] = Device(dev, [])
# Throttle the update for all Netio switches of one Netio
dev.update = util.Throttle(MIN_TIME_BETWEEN_SCANS)(dev.update)
for key in config[CONF_OUTLETS]:
switch = NetioSwitch(DEVICES[host].netio, key, config[CONF_OUTLETS][key])
DEVICES[host].entities.append(switch)
add_entities(DEVICES[host].entities)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, dispose)
return True
def dispose(event):
"""Close connections to Netio Devices."""
for value in DEVICES.values():
value.netio.stop()
class NetioApiView(HomeAssistantView):
"""WSGI handler class."""
url = URL_API_NETIO_EP
name = "api:netio"
@callback
def get(self, request, host):
"""Request handler."""
data = request.query
states, consumptions, cumulated_consumptions, start_dates = [], [], [], []
for i in range(1, 5):
out = "output%d" % i
states.append(data.get("%s_state" % out) == STATE_ON)
consumptions.append(float(data.get("%s_consumption" % out, 0)))
cumulated_consumptions.append(
float(data.get("%s_cumulatedConsumption" % out, 0)) / 1000
)
start_dates.append(data.get("%s_consumptionStart" % out, ""))
_LOGGER.debug(
"%s: %s, %s, %s since %s",
host,
states,
consumptions,
cumulated_consumptions,
start_dates,
)
ndev = DEVICES[host].netio
ndev.consumptions = consumptions
ndev.cumulated_consumptions = cumulated_consumptions
ndev.states = states
ndev.start_dates = start_dates
for dev in DEVICES[host].entities:
dev.async_write_ha_state()
return self.json(True)
class NetioSwitch(SwitchEntity):
"""Provide a Netio linked switch."""
def __init__(self, netio, outlet, name):
"""Initialize the Netio switch."""
self._name = name
self.outlet = outlet
self.netio = netio
@property
def name(self):
"""Return the device's name."""
return self._name
@property
def available(self):
"""Return true if entity is available."""
return not hasattr(self, "telnet")
def turn_on(self, **kwargs):
"""Turn switch on."""
self._set(True)
def turn_off(self, **kwargs):
"""Turn switch off."""
self._set(False)
def _set(self, value):
val = list("uuuu")
val[int(self.outlet) - 1] = "1" if value else "0"
self.netio.get("port list %s" % "".join(val))
self.netio.states[int(self.outlet) - 1] = value
self.schedule_update_ha_state()
@property
def is_on(self):
"""Return the switch's status."""
return self.netio.states[int(self.outlet) - 1]
def update(self):
"""Update the state."""
self.netio.update()
@property
def state_attributes(self):
"""Return optional state attributes."""
return {
ATTR_TOTAL_CONSUMPTION_KWH: self.cumulated_consumption_kwh,
ATTR_START_DATE: self.start_date.split("|")[0],
}
@property
def current_power_w(self):
"""Return actual power."""
return self.netio.consumptions[int(self.outlet) - 1]
@property
def cumulated_consumption_kwh(self):
"""Return the total enerygy consumption since start_date."""
return self.netio.cumulated_consumptions[int(self.outlet) - 1]
@property
def start_date(self):
"""Point in time when the energy accumulation started."""
return self.netio.start_dates[int(self.outlet) - 1]
|
from typing import Dict
from typing import List
from typing import Tuple
from pymesos import MesosSchedulerDriver
from paasta_tools.frameworks.constraints import ConstraintState
from paasta_tools.frameworks.native_scheduler import LIVE_TASK_STATES
from paasta_tools.frameworks.native_scheduler import NativeScheduler
from paasta_tools.frameworks.native_service_config import TaskInfo
from paasta_tools.frameworks.native_service_config import UnknownNativeServiceError
class AdhocScheduler(NativeScheduler):
def __init__(self, *args, **kwargs):
self.dry_run = kwargs.pop("dry_run")
if kwargs.get("service_config_overrides") is None:
kwargs["service_config_overrides"] = {}
kwargs["service_config_overrides"].setdefault("instances", 1)
self.finished_countdown = kwargs["service_config_overrides"]["instances"]
super().__init__(*args, **kwargs)
def need_to_stop(self):
# Is used to decide whether to stop the driver or try to start more tasks.
return self.finished_countdown == 0
def statusUpdate(self, driver: MesosSchedulerDriver, update: Dict):
super().statusUpdate(driver, update)
if update["state"] not in LIVE_TASK_STATES:
self.finished_countdown -= 1
# Stop if task ran and finished
if self.need_to_stop():
driver.stop()
def tasks_and_state_for_offer(
self, driver: MesosSchedulerDriver, offer, state: ConstraintState
) -> Tuple[List[TaskInfo], ConstraintState]:
# In dry run satisfy exit-conditions after we got the offer
if self.dry_run or self.need_to_stop():
if self.dry_run:
tasks, _ = super().tasks_and_state_for_offer(driver, offer, state)
print("Would have launched: ", tasks)
driver.stop()
return [], state
return super().tasks_and_state_for_offer(driver, offer, state)
def kill_tasks_if_necessary(self, *args, **kwargs):
return
def validate_config(self):
if self.service_config.get_cmd() is None:
raise UnknownNativeServiceError("missing cmd in service config")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import abstract_arch
from compare_gan.architectures.arch_ops import batch_norm
from compare_gan.architectures.arch_ops import conv2d
from compare_gan.architectures.arch_ops import deconv2d
from compare_gan.architectures.arch_ops import linear
from compare_gan.architectures.arch_ops import lrelu
import tensorflow as tf
class Generator(abstract_arch.AbstractGenerator):
"""Generator architecture based on InfoGAN."""
def apply(self, z, y, is_training):
"""Build the generator network for the given inputs.
Args:
z: `Tensor` of shape [batch_size, z_dim] with latent code.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: boolean, are we in train or eval model.
Returns:
A tensor of size [batch_size] + self._image_shape with values in [0, 1].
"""
del y
h, w, c = self._image_shape
bs = z.shape.as_list()[0]
net = linear(z, 1024, scope="g_fc1")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn1"))
net = linear(net, 128 * (h // 4) * (w // 4), scope="g_fc2")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn2"))
net = tf.reshape(net, [bs, h // 4, w // 4, 128])
net = deconv2d(net, [bs, h // 2, w // 2, 64], 4, 4, 2, 2, name="g_dc3")
net = lrelu(batch_norm(net, is_training=is_training, name="g_bn3"))
net = deconv2d(net, [bs, h, w, c], 4, 4, 2, 2, name="g_dc4")
out = tf.nn.sigmoid(net)
return out
class Discriminator(abstract_arch.AbstractDiscriminator):
"""Discriminator architecture based on InfoGAN."""
def apply(self, x, y, is_training):
"""Apply the discriminator on a input.
Args:
x: `Tensor` of shape [batch_size, ?, ?, ?] with real or fake images.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: Boolean, whether the architecture should be constructed for
training or inference.
Returns:
Tuple of 3 Tensors, the final prediction of the discriminator, the logits
before the final output activation function and logits form the second
last layer.
"""
use_sn = self._spectral_norm
batch_size = x.shape.as_list()[0]
# Resulting shape: [bs, h/2, w/2, 64].
net = lrelu(conv2d(x, 64, 4, 4, 2, 2, name="d_conv1", use_sn=use_sn))
# Resulting shape: [bs, h/4, w/4, 128].
net = conv2d(net, 128, 4, 4, 2, 2, name="d_conv2", use_sn=use_sn)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn2")
net = lrelu(net)
# Resulting shape: [bs, h * w * 8].
net = tf.reshape(net, [batch_size, -1])
# Resulting shape: [bs, 1024].
net = linear(net, 1024, scope="d_fc3", use_sn=use_sn)
net = self.batch_norm(net, y=y, is_training=is_training, name="d_bn3")
net = lrelu(net)
# Resulting shape: [bs, 1].
out_logit = linear(net, 1, scope="d_fc4", use_sn=use_sn)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, net
|
import numpy as np
import tensorflow as tf
from tensornetwork import (contract, connect, flatten_edges_between,
contract_between, Node)
import pytest
class GraphmodeTensorNetworkTest(tf.test.TestCase):
def test_basic_graphmode(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.ones(10), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
sess = tf.compat.v1.Session()
final_val = sess.run(final_tensor)
self.assertAllClose(final_val, 10.0)
def test_gradient_decent(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.Variable(tf.ones(10)), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
opt = tf.compat.v1.train.GradientDescentOptimizer(0.001)
train_op = opt.minimize(final_tensor)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sess.run(final_tensor), 10.0)
sess.run(train_op)
self.assertLess(sess.run(final_tensor), 10.0)
def test_dynamic_network_sizes(self):
@tf.function
def f(x, n):
x_slice = x[:n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
e = connect(n1[0], n2[0])
return contract(e).get_tensor()
x = np.ones(10)
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)
@pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")
def test_dynamic_network_sizes_contract_between(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract_between(n1, n2).get_tensor()
x = tf.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_standard(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract(flatten_edges_between(n1, n2)).get_tensor()
x = np.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_trace(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
connect(n1[0], n1[2])
connect(n1[1], n1[3])
return contract(flatten_edges_between(n1, n1)).get_tensor()
x = np.ones((3, 4, 3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), np.ones((2,)) * 12)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), np.ones((3,)) * 12)
def test_batch_usage(self,):
def build_tensornetwork(tensors):
a = Node(tensors[0], backend="tensorflow")
b = Node(tensors[1], backend="tensorflow")
e = connect(a[0], b[0])
return contract(e).get_tensor()
tensors = [np.ones((5, 10)), np.ones((5, 10))]
result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float64)
np.testing.assert_allclose(result, np.ones(5) * 10)
if __name__ == '__main__':
tf.test.main()
|
from django.test.utils import override_settings
from django.urls import reverse
from weblate.lang.models import Language
from weblate.trans.tests.test_views import ViewTestCase
class AlertTest(ViewTestCase):
def create_component(self):
return self._create_component("po", "po-duplicates/*.dpo")
def test_duplicates(self):
self.assertEqual(
set(self.component.alert_set.values_list("name", flat=True)),
{
"DuplicateLanguage",
"DuplicateString",
"MissingLicense",
"BrokenBrowserURL",
"BrokenProjectURL",
},
)
alert = self.component.alert_set.get(name="DuplicateLanguage")
self.assertEqual(alert.details["occurrences"][0]["language_code"], "cs")
alert = self.component.alert_set.get(name="DuplicateString")
self.assertEqual(
alert.details["occurrences"][0]["source"], "Thank you for using Weblate."
)
def test_dismiss(self):
self.user.is_superuser = True
self.user.save()
response = self.client.post(
reverse("dismiss-alert", kwargs=self.kw_component),
{"dismiss": "BrokenBrowserURL"},
)
self.assertRedirects(response, self.component.get_absolute_url() + "#alerts")
self.assertTrue(self.component.alert_set.get(name="BrokenBrowserURL").dismissed)
def test_view(self):
response = self.client.get(self.component.get_absolute_url())
self.assertContains(response, "Duplicated translation")
def test_license(self):
def has_license_alert(component):
return component.alert_set.filter(name="MissingLicense").exists()
# No license and public project
component = self.component
component.update_alerts()
self.assertTrue(has_license_alert(component))
# Private project
component.project.access_control = component.project.ACCESS_PRIVATE
component.update_alerts()
self.assertFalse(has_license_alert(component))
# Public, but login required
component.project.access_control = component.project.ACCESS_PUBLIC
with override_settings(LOGIN_REQUIRED_URLS=["some"]):
component.update_alerts()
self.assertFalse(has_license_alert(component))
# Filtered licenses
with override_settings(LICENSE_FILTER=set()):
component.update_alerts()
self.assertFalse(has_license_alert(component))
# Filtered licenses
with override_settings(LICENSE_FILTER={"proprietary"}):
component.update_alerts()
self.assertTrue(has_license_alert(component))
# Set license
component.license = "license"
component.update_alerts()
self.assertFalse(has_license_alert(component))
def test_monolingual(self):
component = self.component
component.update_alerts()
self.assertFalse(
component.alert_set.filter(name="MonolingualTranslation").exists()
)
class LanguageAlertTest(ViewTestCase):
def create_component(self):
return self.create_po_new_base(new_lang="add")
def test_ambiguous_language(self):
component = self.component
self.assertFalse(component.alert_set.filter(name="AmbiguousLanguage").exists())
self.component.add_new_language(
Language.objects.get(code="ku"), self.get_request()
)
self.component.update_alerts()
self.assertTrue(component.alert_set.filter(name="AmbiguousLanguage").exists())
class MonolingualAlertTest(ViewTestCase):
def create_component(self):
return self.create_po_mono()
def test_monolingual(self):
self.assertFalse(
self.component.alert_set.filter(name="MonolingualTranslation").exists()
)
def test_false_bilingual(self):
component = self._create_component(
"po-mono", "po-mono/*.po", project=self.project, name="bimono"
)
self.assertTrue(
component.alert_set.filter(name="MonolingualTranslation").exists()
)
|
import logging
import blebox_uniapi
import pytest
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
STATE_UNKNOWN,
)
from .conftest import async_setup_entity, mock_feature
from tests.async_mock import AsyncMock, PropertyMock
@pytest.fixture(name="saunabox")
def saunabox_fixture():
"""Return a default climate entity mock."""
feature = mock_feature(
"climates",
blebox_uniapi.climate.Climate,
unique_id="BleBox-saunaBox-1afe34db9437-thermostat",
full_name="saunaBox-thermostat",
device_class=None,
is_on=None,
desired=None,
current=None,
min_temp=-54.3,
max_temp=124.3,
)
product = feature.product
type(product).name = PropertyMock(return_value="My sauna")
type(product).model = PropertyMock(return_value="saunaBox")
return (feature, "climate.saunabox_thermostat")
async def test_init(saunabox, hass, config):
"""Test default state."""
_, entity_id = saunabox
entry = await async_setup_entity(hass, config, entity_id)
assert entry.unique_id == "BleBox-saunaBox-1afe34db9437-thermostat"
state = hass.states.get(entity_id)
assert state.name == "saunaBox-thermostat"
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
assert supported_features & SUPPORT_TARGET_TEMPERATURE
assert state.attributes[ATTR_HVAC_MODES] == [HVAC_MODE_OFF, HVAC_MODE_HEAT]
assert ATTR_DEVICE_CLASS not in state.attributes
assert ATTR_HVAC_MODE not in state.attributes
assert ATTR_HVAC_ACTION not in state.attributes
assert state.attributes[ATTR_MIN_TEMP] == -54.3
assert state.attributes[ATTR_MAX_TEMP] == 124.3
assert state.attributes[ATTR_TEMPERATURE] is None
assert state.attributes[ATTR_CURRENT_TEMPERATURE] is None
assert state.state == STATE_UNKNOWN
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(entry.device_id)
assert device.name == "My sauna"
assert device.identifiers == {("blebox", "abcd0123ef5678")}
assert device.manufacturer == "BleBox"
assert device.model == "saunaBox"
assert device.sw_version == "1.23"
async def test_update(saunabox, hass, config):
"""Test updating."""
feature_mock, entity_id = saunabox
def initial_update():
feature_mock.is_on = False
feature_mock.desired = 64.3
feature_mock.current = 40.9
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert state.attributes[ATTR_TEMPERATURE] == 64.3
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 40.9
assert state.state == HVAC_MODE_OFF
async def test_on_when_below_desired(saunabox, hass, config):
"""Test when temperature is below desired."""
feature_mock, entity_id = saunabox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def turn_on():
feature_mock.is_on = True
feature_mock.is_heating = True
feature_mock.desired = 64.8
feature_mock.current = 25.7
feature_mock.async_on = AsyncMock(side_effect=turn_on)
await hass.services.async_call(
"climate",
SERVICE_SET_HVAC_MODE,
{"entity_id": entity_id, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
feature_mock.async_off.assert_not_called()
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
assert state.attributes[ATTR_TEMPERATURE] == 64.8
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 25.7
assert state.state == HVAC_MODE_HEAT
async def test_on_when_above_desired(saunabox, hass, config):
"""Test when temperature is below desired."""
feature_mock, entity_id = saunabox
def initial_update():
feature_mock.is_on = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def turn_on():
feature_mock.is_on = True
feature_mock.is_heating = False
feature_mock.desired = 23.4
feature_mock.current = 28.7
feature_mock.async_on = AsyncMock(side_effect=turn_on)
await hass.services.async_call(
"climate",
SERVICE_SET_HVAC_MODE,
{"entity_id": entity_id, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
feature_mock.async_off.assert_not_called()
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TEMPERATURE] == 23.4
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 28.7
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_IDLE
assert state.state == HVAC_MODE_HEAT
async def test_off(saunabox, hass, config):
"""Test turning off."""
feature_mock, entity_id = saunabox
def initial_update():
feature_mock.is_on = True
feature_mock.is_heating = False
feature_mock.async_update = AsyncMock(side_effect=initial_update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def turn_off():
feature_mock.is_on = False
feature_mock.is_heating = False
feature_mock.desired = 29.8
feature_mock.current = 22.7
feature_mock.async_off = AsyncMock(side_effect=turn_off)
await hass.services.async_call(
"climate",
SERVICE_SET_HVAC_MODE,
{"entity_id": entity_id, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
feature_mock.async_on.assert_not_called()
state = hass.states.get(entity_id)
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_OFF
assert state.attributes[ATTR_TEMPERATURE] == 29.8
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 22.7
assert state.state == HVAC_MODE_OFF
async def test_set_thermo(saunabox, hass, config):
"""Test setting thermostat."""
feature_mock, entity_id = saunabox
def update():
feature_mock.is_on = False
feature_mock.is_heating = False
feature_mock.async_update = AsyncMock(side_effect=update)
await async_setup_entity(hass, config, entity_id)
feature_mock.async_update = AsyncMock()
def set_temp(temp):
feature_mock.is_on = True
feature_mock.is_heating = True
feature_mock.desired = 29.2
feature_mock.current = 29.1
feature_mock.async_set_temperature = AsyncMock(side_effect=set_temp)
await hass.services.async_call(
"climate",
SERVICE_SET_TEMPERATURE,
{"entity_id": entity_id, ATTR_TEMPERATURE: 43.21},
blocking=True,
)
state = hass.states.get(entity_id)
assert state.attributes[ATTR_TEMPERATURE] == 29.2
assert state.attributes[ATTR_CURRENT_TEMPERATURE] == 29.1
assert state.attributes[ATTR_HVAC_ACTION] == CURRENT_HVAC_HEAT
assert state.state == HVAC_MODE_HEAT
async def test_update_failure(saunabox, hass, config, caplog):
"""Test that update failures are logged."""
caplog.set_level(logging.ERROR)
feature_mock, entity_id = saunabox
feature_mock.async_update = AsyncMock(side_effect=blebox_uniapi.error.ClientError)
await async_setup_entity(hass, config, entity_id)
assert f"Updating '{feature_mock.full_name}' failed: " in caplog.text
|
import datetime
import json
import logging
import os
import pipes
import posixpath
import subprocess
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import hbase_ycsb_benchmark as hbase_ycsb
from perfkitbenchmarker.linux_packages import hbase
from perfkitbenchmarker.linux_packages import ycsb
from perfkitbenchmarker.providers.gcp import gcp_bigtable
FLAGS = flags.FLAGS
HBASE_CLIENT_VERSION = '1.x'
BIGTABLE_CLIENT_VERSION = '1.4.0'
# TODO(user): remove the custom ycsb build once the head version of YCSB
# is updated to share Bigtable table object. The source code of the patched YCSB
# 0.14.0 can be found at 'https://storage.googleapis.com/cbt_ycsb_client_jar/'
# 'YCSB-0.14.0-Bigtable-table-object-sharing.zip'.
YCSB_BIGTABLE_TABLE_SHARING_TAR_URL = (
'https://storage.googleapis.com/cbt_ycsb_client_jar/ycsb-0.14.0.tar.gz')
flags.DEFINE_string('google_bigtable_endpoint', 'bigtable.googleapis.com',
'Google API endpoint for Cloud Bigtable.')
flags.DEFINE_string('google_bigtable_admin_endpoint',
'bigtableadmin.googleapis.com',
'Google API endpoint for Cloud Bigtable table '
'administration.')
flags.DEFINE_string('google_bigtable_instance_name', None,
'Bigtable instance name. If not specified, new instance '
'will be created and deleted on the fly.')
flags.DEFINE_string('google_bigtable_static_table_name', None,
'Bigtable table name. If not specified, a temporary table '
'will be created and deleted on the fly.')
flags.DEFINE_boolean('google_bigtable_enable_table_object_sharing', False,
'If true, will use a YCSB binary that shares the same '
'Bigtable table object across all the threads on a VM.')
flags.DEFINE_string(
'google_bigtable_hbase_jar_url',
'https://oss.sonatype.org/service/local/repositories/releases/content/'
'com/google/cloud/bigtable/bigtable-hbase-{0}-hadoop/'
'{1}/bigtable-hbase-{0}-hadoop-{1}.jar'.format(
HBASE_CLIENT_VERSION,
BIGTABLE_CLIENT_VERSION),
'URL for the Bigtable-HBase client JAR.')
flags.DEFINE_boolean('get_bigtable_cluster_cpu_utilization', False,
'If true, will gather bigtable cluster cpu utilization '
'for the duration of performance test run stage, and add '
'a sample for the data. To enable this '
'functionality, need to set environment variable '
'GOOGLE_APPLICATION_CREDENTIALS as described in '
'https://cloud.google.com/docs/authentication/'
'getting-started.')
BENCHMARK_NAME = 'cloud_bigtable_ycsb'
BENCHMARK_CONFIG = """
cloud_bigtable_ycsb:
description: >
Run YCSB against an existing Cloud Bigtable
instance. Configure the number of client VMs via --num_vms.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: null
flags:
gcloud_scopes: >
https://www.googleapis.com/auth/bigtable.admin
https://www.googleapis.com/auth/bigtable.data"""
# Starting from version 1.4.0, there is no need to install a separate boring ssl
# via TCNATIVE_BORINGSSL_URL.
TCNATIVE_BORINGSSL_JAR = (
'netty-tcnative-boringssl-static-1.1.33.Fork13-linux-x86_64.jar')
TCNATIVE_BORINGSSL_URL = posixpath.join(
'https://search.maven.org/remotecontent?filepath='
'io/netty/netty-tcnative-boringssl-static/'
'1.1.33.Fork13/', TCNATIVE_BORINGSSL_JAR)
METRICS_CORE_JAR = 'metrics-core-3.1.2.jar'
DROPWIZARD_METRICS_CORE_URL = posixpath.join(
'https://search.maven.org/remotecontent?filepath='
'io/dropwizard/metrics/metrics-core/3.1.2/', METRICS_CORE_JAR)
HBASE_SITE = 'cloudbigtable/hbase-site.xml.j2'
HBASE_CONF_FILES = [HBASE_SITE]
HBASE_BINDING = 'hbase10-binding'
YCSB_HBASE_LIB = posixpath.join(ycsb.YCSB_DIR, HBASE_BINDING, 'lib')
YCSB_HBASE_CONF = posixpath.join(ycsb.YCSB_DIR, HBASE_BINDING, 'conf')
REQUIRED_SCOPES = (
'https://www.googleapis.com/auth/bigtable.admin',
'https://www.googleapis.com/auth/bigtable.data')
# TODO(connormccoy): Make table parameters configurable.
COLUMN_FAMILY = 'cf'
BENCHMARK_DATA = {
METRICS_CORE_JAR:
'245ba2a66a9bc710ce4db14711126e77bcb4e6d96ef7e622659280f3c90cbb5c',
TCNATIVE_BORINGSSL_JAR:
'027d87e77a08dedf2005d9333db49aa37e08d599aff64ea18da9893912bdf314'
}
BENCHMARK_DATA_URL = {
METRICS_CORE_JAR: DROPWIZARD_METRICS_CORE_URL,
TCNATIVE_BORINGSSL_JAR: TCNATIVE_BORINGSSL_URL
}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Unused.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
del benchmark_config
for resource in HBASE_CONF_FILES:
data.ResourcePath(resource)
hbase.CheckPrerequisites()
ycsb.CheckPrerequisites()
for scope in REQUIRED_SCOPES:
if scope not in FLAGS.gcloud_scopes:
raise ValueError('Scope {0} required.'.format(scope))
# TODO: extract from gcloud config if available.
if FLAGS.google_bigtable_instance_name:
instance = _GetInstanceDescription(FLAGS.project or _GetDefaultProject(),
FLAGS.google_bigtable_instance_name)
if instance:
logging.info('Found instance: %s', instance)
else:
logging.info('No instance; will create in Prepare.')
def _GetInstanceDescription(project, instance_name):
"""Gets the description for a Cloud Bigtable instance.
Args:
project: str. Name of the project in which the instance was created.
instance_name: str. ID of the desired Bigtable instance.
Returns:
A dictionary containing an instance description.
Raises:
KeyError: when the instance was not found.
IOError: when the list bigtable command fails.
"""
env = {'CLOUDSDK_CORE_DISABLE_PROMPTS': '1'}
env.update(os.environ)
cmd = [FLAGS.gcloud_path, 'beta', 'bigtable', 'instances', 'describe',
instance_name,
'--format', 'json',
'--project', project]
stdout, stderr, returncode = vm_util.IssueCommand(cmd, env=env)
if returncode:
raise IOError('Command "{0}" failed:\nSTDOUT:\n{1}\nSTDERR:\n{2}'.format(
' '.join(cmd), stdout, stderr))
return json.loads(stdout)
def _GetTableName():
return (FLAGS.google_bigtable_static_table_name or
'ycsb{0}'.format(FLAGS.run_uri))
def _GetDefaultProject():
cmd = [FLAGS.gcloud_path, 'config', 'list', '--format', 'json']
stdout, _, return_code = vm_util.IssueCommand(cmd)
if return_code:
raise subprocess.CalledProcessError(return_code, cmd, stdout)
config = json.loads(stdout)
try:
return config['core']['project']
except KeyError:
raise KeyError('No default project found in {0}'.format(config))
def _Install(vm):
"""Install YCSB and HBase on 'vm'."""
vm.Install('hbase')
vm.Install('ycsb')
vm.Install('curl')
instance_name = (FLAGS.google_bigtable_instance_name or
'pkb-bigtable-{0}'.format(FLAGS.run_uri))
hbase_lib = posixpath.join(hbase.HBASE_DIR, 'lib')
preprovisioned_pkgs = [TCNATIVE_BORINGSSL_JAR]
if 'hbase-1.x' in FLAGS.google_bigtable_hbase_jar_url:
preprovisioned_pkgs.append(METRICS_CORE_JAR)
vm.InstallPreprovisionedBenchmarkData(
BENCHMARK_NAME, preprovisioned_pkgs, YCSB_HBASE_LIB)
vm.InstallPreprovisionedBenchmarkData(
BENCHMARK_NAME, preprovisioned_pkgs, hbase_lib)
url = FLAGS.google_bigtable_hbase_jar_url
jar_name = os.path.basename(url)
jar_path = posixpath.join(YCSB_HBASE_LIB, jar_name)
vm.RemoteCommand('curl -Lo {0} {1}'.format(jar_path, url))
vm.RemoteCommand('cp {0} {1}'.format(jar_path, hbase_lib))
vm.RemoteCommand('echo "export JAVA_HOME=/usr" >> {0}/hbase-env.sh'.format(
hbase.HBASE_CONF_DIR))
context = {
'google_bigtable_endpoint': FLAGS.google_bigtable_endpoint,
'google_bigtable_admin_endpoint': FLAGS.google_bigtable_admin_endpoint,
'project': FLAGS.project or _GetDefaultProject(),
'instance': instance_name,
'hbase_version': HBASE_CLIENT_VERSION.replace('.', '_')
}
for file_name in HBASE_CONF_FILES:
file_path = data.ResourcePath(file_name)
remote_path = posixpath.join(hbase.HBASE_CONF_DIR,
os.path.basename(file_name))
if file_name.endswith('.j2'):
vm.RenderTemplate(file_path, os.path.splitext(remote_path)[0], context)
else:
vm.RemoteCopy(file_path, remote_path)
def MaxWithDefault(iterable, key, default):
"""Equivalent to max on python 3.4 or later."""
try:
return max(iterable, key=key)
except ValueError:
return default
def _GetCpuUtilizationSample(samples, instance_id):
"""Gets a list of cpu utilization samples - one per cluster.
Note that the utilization only covers the run stage.
Args:
samples: list of sample.Sample. Used to find the load and run samples for
computing the run time.
instance_id: the bigtable instance id.
Returns:
a sample describing the runtime
Raises:
Exception: if the time for running can not be found or if
querying the cpu sampling fails.
"""
load_sample = MaxWithDefault(
(cur_sample for cur_sample in samples
if cur_sample.metadata.get('stage') == 'load'),
key=lambda sample: sample.timestamp,
default=None)
# get the last sample recorded in the run stage
last_run_sample = MaxWithDefault(
(cur_sample for cur_sample in samples
if cur_sample.metadata.get('stage') == 'run'),
key=lambda sample: sample.timestamp,
default=None)
if not load_sample or not last_run_sample:
raise Exception('Could not find the load or run sample, '
'so cant get the time for cpu utilization')
# pylint: disable=g-import-not-at-top
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import query
# Query the cpu utilization, which are gauged values at each minute in the
# time window.
client = monitoring_v3.MetricServiceClient()
start_timestamp = load_sample.timestamp
end_timestamp = last_run_sample.timestamp
samples = []
for metric in ['cpu_load', 'cpu_load_hottest_node']:
cpu_query = query.Query(
client, project=(FLAGS.project or _GetDefaultProject()),
metric_type='bigtable.googleapis.com/cluster/{}'.format(metric),
end_time=datetime.datetime.utcfromtimestamp(end_timestamp),
minutes=int((end_timestamp - start_timestamp) / 60))
cpu_query = cpu_query.select_resources(instance=instance_id)
time_series = list(cpu_query)
if not time_series:
raise Exception(
'Time series for computing {} could not be found.'.format(metric))
# Build the dict to be added to samples.
for cluster_number, cluster_time_series in enumerate(time_series):
utilization = [
round(point.value.double_value, 3)
for point in cluster_time_series.points]
metadata = {
'cluster_number': cluster_number,
'cpu_utilization_per_minute': utilization,
}
cpu_utilization_sample = sample.Sample(
'{}_array'.format(metric), -1, metric, metadata)
samples.append(cpu_utilization_sample)
return samples
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run cloud bigtable.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.always_call_cleanup = True
vms = benchmark_spec.vms
if FLAGS.google_bigtable_enable_table_object_sharing:
ycsb.SetYcsbTarUrl(YCSB_BIGTABLE_TABLE_SHARING_TAR_URL)
# TODO: in the future, it might be nice to change this so that
# a gcp_bigtable.GcpBigtableInstance can be created with an
# flag that says don't create/delete the instance. That would
# reduce the code paths here.
if FLAGS.google_bigtable_instance_name is None:
instance_name = 'pkb-bigtable-{0}'.format(FLAGS.run_uri)
project = FLAGS.project or _GetDefaultProject()
logging.info('Creating bigtable instance %s', instance_name)
zone = FLAGS.google_bigtable_zone
benchmark_spec.bigtable_instance = gcp_bigtable.GcpBigtableInstance(
instance_name, project, zone)
benchmark_spec.bigtable_instance.Create()
vm_util.RunThreaded(_Install, vms)
table_name = _GetTableName()
# If the table already exists, it will be an no-op.
hbase_ycsb.CreateYCSBTable(vms[0], table_name=table_name, use_snappy=False,
limit_filesize=False)
# Add hbase conf dir to the classpath.
ycsb_memory = min(vms[0].total_memory_kb // 1024, 4096)
jvm_args = pipes.quote(' -Xmx{0}m'.format(ycsb_memory))
executor_flags = {
'cp': hbase.HBASE_CONF_DIR,
'jvm-args': jvm_args,
'table': table_name}
benchmark_spec.executor = ycsb.YCSBExecutor('hbase10', **executor_flags)
def Run(benchmark_spec):
"""Spawn YCSB and gather the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
vms = benchmark_spec.vms
metadata = {
'ycsb_client_vms': len(vms),
}
instance_name = 'pkb-bigtable-{0}'.format(FLAGS.run_uri)
if FLAGS.google_bigtable_instance_name:
instance_name = FLAGS.google_bigtable_instance_name
clusters = gcp_bigtable.GetClustersDecription(
instance_name, FLAGS.project or _GetDefaultProject())
metadata['bigtable_zone'] = [
cluster['zone'] for cluster in clusters]
metadata['bigtable_storage_type'] = [
cluster['defaultStorageType'] for cluster in clusters]
metadata['bigtable_node_count'] = [
cluster['serveNodes'] for cluster in clusters]
else:
metadata['bigtable_zone'] = FLAGS.google_bigtable_zone
metadata[
'bigtable_replication_zone'] = FLAGS.bigtable_replication_cluster_zone
metadata['bigtable_storage_type'] = FLAGS.bigtable_storage_type
metadata['bigtable_node_count'] = FLAGS.bigtable_node_count
metadata['bigtable_multicluster_routing'] = (
FLAGS.bigtable_multicluster_routing)
# By default YCSB uses a BufferedMutator for Puts / Deletes.
# This leads to incorrect update latencies, since since the call returns
# before the request is acked by the server.
# Disable this behavior during the benchmark run.
run_kwargs = {
'columnfamily': COLUMN_FAMILY,
'clientbuffering': 'false'}
load_kwargs = run_kwargs.copy()
# During the load stage, use a buffered mutator with a single thread.
# The BufferedMutator will handle multiplexing RPCs.
load_kwargs['clientbuffering'] = 'true'
if not FLAGS['ycsb_preload_threads'].present:
load_kwargs['threads'] = 1
samples = list(benchmark_spec.executor.LoadAndRun(
vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs))
# Optionally add new samples for cluster cpu utilization.
if FLAGS.get_bigtable_cluster_cpu_utilization:
cpu_utilization_samples = _GetCpuUtilizationSample(samples, instance_name)
samples.extend(cpu_utilization_samples)
for current_sample in samples:
current_sample.metadata.update(metadata)
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
# Delete table
if FLAGS.google_bigtable_instance_name is None:
benchmark_spec.bigtable_instance.Delete()
elif FLAGS.google_bigtable_static_table_name is None:
# Only need to drop the temporary tables if we're not deleting the instance.
vm = benchmark_spec.vms[0]
command = ("""echo 'disable "{0}"; drop "{0}"; exit' | """
"""{1}/hbase shell""").format(_GetTableName(), hbase.HBASE_BIN)
vm.RemoteCommand(command, should_log=True, ignore_failure=True)
|
import unittest
from textwrap import dedent
import mock
from mock import Mock
from setup import Scripts
class TestMakeScript(unittest.TestCase):
def setUp(self):
self.make_file_executable = Mock()
self.write_file = Mock()
def capture(name, contents):
self.name = name
self.contents = contents
self.write_file.side_effect = capture
bindir = Scripts(
make_file_executable = self.make_file_executable,
write_file = self.write_file)
bindir.add_script('trash-put', 'trashcli.cmds', 'put')
def test_should_set_executable_permission(self):
self.make_file_executable.assert_called_with('trash-put')
def test_should_write_the_script(self):
self.write_file.assert_called_with( 'trash-put', mock.ANY)
def test_the_script_should_call_the_right_function_from_the_right_module(self):
args, kwargs = self.write_file.call_args
(_, contents) = args
expected = dedent("""\
#!/usr/bin/env python
from __future__ import absolute_import
import sys
from trashcli.cmds import put as main
sys.exit(main())
""")
assert expected == contents, ("Expected:\n---\n%s---\n"
"Actual :\n---\n%s---\n"
% (expected, contents))
class TestListOfCreatedScripts(unittest.TestCase):
def setUp(self):
self.bindir = Scripts(
make_file_executable = Mock(),
write_file = Mock())
def test_is_empty_on_start_up(self):
assert self.bindir.created_scripts == []
def test_collect_added_script(self):
self.bindir.add_script('foo-command', 'foo-module', 'main')
assert self.bindir.created_scripts == ['foo-command']
|
from flexx import flx
import os
BASE_DIR = os.getcwd()
with open(BASE_DIR + '/static/css/style.css') as f:
style = f.read()
with open(BASE_DIR + '/static/js/script.js') as f:
script = f.read()
flx.assets.associate_asset(__name__, 'style.css', style)
flx.assets.associate_asset(__name__, 'script.js', script)
class Main(flx.Widget):
def init(self):
flx.Widget(flex=1)
with flx.VBox():
with flx.HBox():
self.b1 = flx.Button(text='Hello', css_class="border-red", flex=1)
self.b2 = flx.Button(text='World', css_class="border-green", flex=1)
flx.Widget(flex=1)
if __name__ == '__main__':
m = flx.launch(Main)
flx.run()
|
from datetime import timedelta
from celery.schedules import crontab
from django.conf import settings
from django.db.models import Count, Q
from django.utils import timezone
from django.utils.translation import gettext as _
from weblate.accounts.notifications import send_notification_email
from weblate.billing.models import Billing
from weblate.utils.celery import app
@app.task(trail=False)
def billing_check():
Billing.objects.check_limits()
@app.task(trail=False)
def billing_alert():
for bill in Billing.objects.filter(state=Billing.STATE_ACTIVE):
in_limit = bill.in_display_limits()
for project in bill.projects.iterator():
for component in project.component_set.iterator():
if in_limit:
component.delete_alert("BillingLimit")
else:
component.add_alert("BillingLimit")
@app.task(trail=False)
def billing_notify():
billing_check()
limit = Billing.objects.get_out_of_limits()
due = Billing.objects.get_unpaid()
with_project = Billing.objects.annotate(Count("projects")).filter(
projects__count__gt=0
)
toremove = with_project.exclude(removal=None).order_by("removal")
trial = with_project.filter(removal=None, state=Billing.STATE_TRIAL).order_by(
"expiry"
)
if limit or due or toremove or trial:
send_notification_email(
"en",
[a[1] for a in settings.ADMINS] + settings.ADMINS_BILLING,
"billing_check",
context={
"limit": limit,
"due": due,
"toremove": toremove,
"trial": trial,
},
)
@app.task(trail=False)
def notify_expired():
# Notify about expired billings
possible_billings = Billing.objects.filter(
# Active without payment (checked later)
Q(state=Billing.STATE_ACTIVE)
# Scheduled removal
| Q(removal__isnull=False)
# Trials expiring soon
| Q(state=Billing.STATE_TRIAL, expiry__lte=timezone.now() + timedelta(days=7))
).exclude(projects__isnull=True)
for bill in possible_billings:
if bill.state == Billing.STATE_ACTIVE and bill.check_payment_status(now=True):
continue
if bill.plan.price:
note = _(
"You will stop receiving this notification once "
"you pay the bills or the project is removed."
)
else:
note = _(
"You will stop receiving this notification once "
"you change to regular subscription or the project is removed."
)
for user in bill.get_notify_users():
send_notification_email(
user.profile.language,
[user.email],
"billing_expired",
context={
"billing": bill,
"payment_enabled": getattr(settings, "PAYMENT_ENABLED", False),
"unsubscribe_note": note,
},
info=bill,
)
@app.task(trail=False)
def schedule_removal():
removal = timezone.now() + timedelta(days=settings.BILLING_REMOVAL_PERIOD)
for bill in Billing.objects.filter(state=Billing.STATE_ACTIVE, removal=None):
if bill.check_payment_status():
continue
bill.removal = removal
bill.save(update_fields=["removal"])
@app.task(trail=False)
def perform_removal():
for bill in Billing.objects.filter(removal__lte=timezone.now()):
for user in bill.get_notify_users():
send_notification_email(
user.profile.language,
[user.email],
"billing_expired",
context={"billing": bill, "final_removal": True},
info=bill,
)
for prj in bill.projects.iterator():
prj.log_warning("removing due to unpaid billing")
prj.stats.invalidate()
prj.delete()
bill.removal = None
bill.state = Billing.STATE_TERMINATED
bill.save()
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(3600, billing_check.s(), name="billing-check")
sender.add_periodic_task(3600 * 24, billing_alert.s(), name="billing-alert")
sender.add_periodic_task(
crontab(hour=3, minute=0, day_of_week="monday,thursday"),
billing_notify.s(),
name="billing-notify",
)
sender.add_periodic_task(
crontab(hour=1, minute=0),
perform_removal.s(),
name="perform-removal",
)
sender.add_periodic_task(
crontab(hour=2, minute=0, day_of_week="monday,thursday"),
schedule_removal.s(),
name="schedule-removal",
)
sender.add_periodic_task(
crontab(hour=2, minute=30, day_of_week="monday,thursday"),
notify_expired.s(),
name="notify-expired",
)
|
from django.conf import settings
from django.core import mail
from django.urls import reverse
from weblate.auth.models import Group, User, get_anonymous
from weblate.lang.models import Language
from weblate.trans.models import Project
from weblate.trans.tests.test_views import FixtureTestCase
class ACLTest(FixtureTestCase):
def setUp(self):
super().setUp()
self.project.access_control = Project.ACCESS_PRIVATE
self.project.save()
self.access_url = reverse("manage-access", kwargs=self.kw_project)
self.translate_url = reverse("translate", kwargs=self.kw_translation)
self.second_user = User.objects.create_user(
"seconduser", "noreply@example.org", "testpassword"
)
self.admin_group = self.project.group_set.get(name__endswith="@Administration")
def add_acl(self):
"""Add user to ACL."""
self.project.add_user(self.user, "@Translate")
def test_acl_denied(self):
"""No access to the project without ACL."""
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 404)
self.assertFalse(get_anonymous().can_access_project(self.project))
def test_acl_disable(self):
"""Test disabling ACL."""
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 404)
self.project.access_control = Project.ACCESS_PUBLIC
self.project.save()
self.assertTrue(get_anonymous().can_access_project(self.project))
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(self.translate_url)
self.assertContains(response, 'type="submit" name="save"')
def test_acl_protected(self):
"""Test ACL protected project."""
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 404)
self.project.access_control = Project.ACCESS_PROTECTED
self.project.save()
self.assertTrue(get_anonymous().can_access_project(self.project))
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 403)
response = self.client.get(self.translate_url)
self.assertContains(
response, "Insufficient privileges for saving translations."
)
def test_acl(self):
"""Regular user should not have access to user management."""
self.add_acl()
response = self.client.get(self.access_url)
self.assertEqual(response.status_code, 403)
def test_edit_acl(self):
"""Manager should have access to user management."""
self.add_acl()
self.make_manager()
response = self.client.get(self.access_url)
self.assertContains(response, "Users")
def test_edit_acl_owner(self):
"""Owner should have access to user management."""
self.add_acl()
self.project.add_user(self.user, "@Administration")
response = self.client.get(self.access_url)
self.assertContains(response, "Users")
def add_user(self):
self.add_acl()
self.project.add_user(self.user, "@Administration")
# Add user
response = self.client.post(
reverse("add-user", kwargs=self.kw_project),
{"user": self.second_user.username},
)
self.assertRedirects(response, self.access_url)
# Ensure user is now listed
response = self.client.get(self.access_url)
self.assertContains(response, self.second_user.username)
self.assertContains(response, self.second_user.email)
def test_invite_invalid(self):
"""Test inviting invalid form."""
self.project.add_user(self.user, "@Administration")
response = self.client.post(
reverse("invite-user", kwargs=self.kw_project),
{"email": "invalid", "username": "valid", "full_name": "name"},
follow=True,
)
# This error comes from Django validation
self.assertContains(response, "Enter a valid email addres")
def test_invite_existing(self):
"""Test inviting existing user."""
self.project.add_user(self.user, "@Administration")
response = self.client.post(
reverse("invite-user", kwargs=self.kw_project),
{
"email": self.user.email,
"username": self.user.username,
"full_name": "name",
},
follow=True,
)
self.assertContains(response, "User with this E-mail already exists")
def test_invite_user(self):
"""Test inviting user."""
self.project.add_user(self.user, "@Administration")
response = self.client.post(
reverse("invite-user", kwargs=self.kw_project),
{"email": "user@example.com", "username": "username", "full_name": "name"},
follow=True,
)
# Ensure user is now listed
self.assertContains(response, "user@example.com")
# Check invitation mail
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "[Weblate] Invitation to Weblate")
mail.outbox = []
# Resend invitation
response = self.client.post(
reverse("resend_invitation", kwargs=self.kw_project),
{"user": "user@example.com"},
follow=True,
)
# Check invitation mail
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "[Weblate] Invitation to Weblate")
def remove_user(self):
# Remove user
response = self.client.post(
reverse("delete-user", kwargs=self.kw_project),
{"user": self.second_user.username},
)
self.assertRedirects(response, self.access_url)
# Ensure user is now not listed
response = self.client.get(self.access_url)
self.assertNotContains(response, self.second_user.username)
self.assertNotContains(response, self.second_user.email)
def test_add_acl(self):
"""Adding and removing user from the ACL project."""
self.add_user()
self.remove_user()
def test_add_owner(self):
"""Adding and removing owner from the ACL project."""
self.add_user()
self.client.post(
reverse("set-groups", kwargs=self.kw_project),
{
"user": self.second_user.username,
"group": self.admin_group.pk,
"action": "add",
},
)
self.assertTrue(
User.objects.all_admins(self.project)
.filter(username=self.second_user.username)
.exists()
)
self.client.post(
reverse("set-groups", kwargs=self.kw_project),
{
"user": self.second_user.username,
"group": self.admin_group.pk,
"action": "remove",
},
)
self.assertFalse(
User.objects.all_admins(self.project)
.filter(username=self.second_user.username)
.exists()
)
self.remove_user()
def test_delete_owner(self):
"""Adding and deleting owner from the ACL project."""
self.add_user()
self.client.post(
reverse("set-groups", kwargs=self.kw_project),
{
"user": self.second_user.username,
"group": self.admin_group.pk,
"action": "add",
},
)
self.remove_user()
self.assertFalse(
User.objects.all_admins(self.project)
.filter(username=self.second_user.username)
.exists()
)
def test_denied_owner_delete(self):
"""Test that deleting last owner does not work."""
self.project.add_user(self.user, "@Administration")
self.client.post(
reverse("set-groups", kwargs=self.kw_project),
{
"user": self.second_user.username,
"group": self.admin_group.pk,
"action": "remove",
},
)
self.assertTrue(
User.objects.all_admins(self.project)
.filter(username=self.user.username)
.exists()
)
self.client.post(
reverse("set-groups", kwargs=self.kw_project),
{
"user": self.user.username,
"group": self.admin_group.pk,
"action": "remove",
},
)
self.assertTrue(
User.objects.all_admins(self.project)
.filter(username=self.user.username)
.exists()
)
def test_nonexisting_user(self):
"""Test adding non existing user."""
self.project.add_user(self.user, "@Administration")
response = self.client.post(
reverse("add-user", kwargs=self.kw_project),
{"user": "nonexisting"},
follow=True,
)
self.assertContains(response, "No matching user found.")
def test_acl_groups(self):
"""Test handling of ACL groups."""
if "weblate.billing" in settings.INSTALLED_APPS:
billing_group = 1
else:
billing_group = 0
match = f"{self.project.name}@"
self.project.access_control = Project.ACCESS_PUBLIC
self.project.translation_review = False
self.project.save()
self.assertEqual(1, Group.objects.filter(name__startswith=match).count())
self.project.access_control = Project.ACCESS_PROTECTED
self.project.translation_review = True
self.project.save()
self.assertEqual(
9 + billing_group, Group.objects.filter(name__startswith=match).count()
)
self.project.access_control = Project.ACCESS_PRIVATE
self.project.translation_review = True
self.project.save()
self.assertEqual(
9 + billing_group, Group.objects.filter(name__startswith=match).count()
)
self.project.access_control = Project.ACCESS_CUSTOM
self.project.save()
self.assertEqual(0, Group.objects.filter(name__startswith=match).count())
self.project.access_control = Project.ACCESS_CUSTOM
self.project.save()
self.assertEqual(0, Group.objects.filter(name__startswith=match).count())
self.project.access_control = Project.ACCESS_PRIVATE
self.project.save()
self.assertEqual(
9 + billing_group, Group.objects.filter(name__startswith=match).count()
)
self.project.delete()
self.assertEqual(0, Group.objects.filter(name__startswith=match).count())
def test_restricted_component(self):
# Make the project public
self.project.access_control = Project.ACCESS_PUBLIC
self.project.save()
# Add user language to ensure the suggestions are shown
self.user.profile.languages.add(Language.objects.get(code="cs"))
url = self.component.get_absolute_url()
# It is shown on the dashboard and accessible
self.assertEqual(self.client.get(url).status_code, 200)
self.assertContains(self.client.get(reverse("home")), url)
# Make it restricted
self.component.restricted = True
self.component.save(update_fields=["restricted"])
# It is no longer shown on the dashboard and not accessible
self.assertEqual(self.client.get(url).status_code, 404)
self.assertNotContains(self.client.get(reverse("home")), url)
|
import asyncio
import logging
from omnilogic import LoginException, OmniLogic, OmniLogicException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from .common import OmniLogicUpdateCoordinator
from .const import CONF_SCAN_INTERVAL, COORDINATOR, DOMAIN, OMNI_API
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Omnilogic component."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Omnilogic from a config entry."""
conf = entry.data
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
polling_interval = 6
if CONF_SCAN_INTERVAL in conf:
polling_interval = conf[CONF_SCAN_INTERVAL]
session = aiohttp_client.async_get_clientsession(hass)
api = OmniLogic(username, password, session)
try:
await api.connect()
await api.get_telemetry_data()
except LoginException as error:
_LOGGER.error("Login Failed: %s", error)
return False
except OmniLogicException as error:
_LOGGER.debug("OmniLogic API error: %s", error)
raise ConfigEntryNotReady from error
coordinator = OmniLogicUpdateCoordinator(
hass=hass,
api=api,
name="Omnilogic",
polling_interval=polling_interval,
)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR: coordinator,
OMNI_API: api,
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import asyncio
from datetime import timedelta
import logging
from PyTado.interface import Tado
from requests import RequestException
import requests.exceptions
import voluptuous as vol
from homeassistant.components.climate.const import PRESET_AWAY, PRESET_HOME
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import Throttle
from .const import (
CONF_FALLBACK,
DATA,
DOMAIN,
SIGNAL_TADO_UPDATE_RECEIVED,
UPDATE_LISTENER,
UPDATE_TRACK,
)
_LOGGER = logging.getLogger(__name__)
TADO_COMPONENTS = ["sensor", "climate", "water_heater"]
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=10)
SCAN_INTERVAL = timedelta(seconds=15)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FALLBACK, default=True): cv.boolean,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Tado component."""
hass.data.setdefault(DOMAIN, {})
if DOMAIN not in config:
return True
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Tado from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
fallback = entry.options.get(CONF_FALLBACK, True)
tadoconnector = TadoConnector(hass, username, password, fallback)
try:
await hass.async_add_executor_job(tadoconnector.setup)
except KeyError:
_LOGGER.error("Failed to login to tado")
return False
except RuntimeError as exc:
_LOGGER.error("Failed to setup tado: %s", exc)
return ConfigEntryNotReady
except requests.exceptions.Timeout as ex:
raise ConfigEntryNotReady from ex
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
_LOGGER.error("Failed to login to tado: %s", ex)
return False
raise ConfigEntryNotReady from ex
# Do first update
await hass.async_add_executor_job(tadoconnector.update)
# Poll for updates in the background
update_track = async_track_time_interval(
hass,
lambda now: tadoconnector.update(),
SCAN_INTERVAL,
)
update_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
DATA: tadoconnector,
UPDATE_TRACK: update_track,
UPDATE_LISTENER: update_listener,
}
for component in TADO_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
if CONF_FALLBACK not in options:
options[CONF_FALLBACK] = entry.data.get(CONF_FALLBACK, True)
hass.config_entries.async_update_entry(entry, options=options)
async def _async_update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in TADO_COMPONENTS
]
)
)
hass.data[DOMAIN][entry.entry_id][UPDATE_TRACK]()
hass.data[DOMAIN][entry.entry_id][UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class TadoConnector:
"""An object to store the Tado data."""
def __init__(self, hass, username, password, fallback):
"""Initialize Tado Connector."""
self.hass = hass
self._username = username
self._password = password
self._fallback = fallback
self.device_id = None
self.tado = None
self.zones = None
self.devices = None
self.data = {
"zone": {},
"device": {},
}
@property
def fallback(self):
"""Return fallback flag to Smart Schedule."""
return self._fallback
def setup(self):
"""Connect to Tado and fetch the zones."""
self.tado = Tado(self._username, self._password)
self.tado.setDebugging(True)
# Load zones and devices
self.zones = self.tado.getZones()
self.devices = self.tado.getMe()["homes"]
self.device_id = self.devices[0]["id"]
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update the registered zones."""
for zone in self.zones:
self.update_sensor("zone", zone["id"])
for device in self.devices:
self.update_sensor("device", device["id"])
def update_sensor(self, sensor_type, sensor):
"""Update the internal data from Tado."""
_LOGGER.debug("Updating %s %s", sensor_type, sensor)
try:
if sensor_type == "zone":
data = self.tado.getZoneState(sensor)
elif sensor_type == "device":
devices_data = self.tado.getDevices()
if not devices_data:
_LOGGER.info("There are no devices to setup on this tado account")
return
data = devices_data[0]
else:
_LOGGER.debug("Unknown sensor: %s", sensor_type)
return
except RuntimeError:
_LOGGER.error(
"Unable to connect to Tado while updating %s %s",
sensor_type,
sensor,
)
return
self.data[sensor_type][sensor] = data
_LOGGER.debug(
"Dispatching update to %s %s %s: %s",
self.device_id,
sensor_type,
sensor,
data,
)
dispatcher_send(
self.hass,
SIGNAL_TADO_UPDATE_RECEIVED.format(self.device_id, sensor_type, sensor),
)
def get_capabilities(self, zone_id):
"""Return the capabilities of the devices."""
return self.tado.getCapabilities(zone_id)
def reset_zone_overlay(self, zone_id):
"""Reset the zone back to the default operation."""
self.tado.resetZoneOverlay(zone_id)
self.update_sensor("zone", zone_id)
def set_presence(
self,
presence=PRESET_HOME,
):
"""Set the presence to home or away."""
if presence == PRESET_AWAY:
self.tado.setAway()
elif presence == PRESET_HOME:
self.tado.setHome()
def set_zone_overlay(
self,
zone_id=None,
overlay_mode=None,
temperature=None,
duration=None,
device_type="HEATING",
mode=None,
fan_speed=None,
swing=None,
):
"""Set a zone overlay."""
_LOGGER.debug(
"Set overlay for zone %s: overlay_mode=%s, temp=%s, duration=%s, type=%s, mode=%s fan_speed=%s swing=%s",
zone_id,
overlay_mode,
temperature,
duration,
device_type,
mode,
fan_speed,
swing,
)
try:
self.tado.setZoneOverlay(
zone_id,
overlay_mode,
temperature,
duration,
device_type,
"ON",
mode,
fanSpeed=fan_speed,
swing=swing,
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
def set_zone_off(self, zone_id, overlay_mode, device_type="HEATING"):
"""Set a zone to off."""
try:
self.tado.setZoneOverlay(
zone_id, overlay_mode, None, None, device_type, "OFF"
)
except RequestException as exc:
_LOGGER.error("Could not set zone overlay: %s", exc)
self.update_sensor("zone", zone_id)
|
__version__ = "4.6.2"
def get_include():
"""
Returns a list of header include paths (for lxml itself, libxml2
and libxslt) needed to compile C code against lxml if it was built
with statically linked libraries.
"""
import os
lxml_path = __path__[0]
include_path = os.path.join(lxml_path, 'includes')
includes = [include_path, lxml_path]
for name in os.listdir(include_path):
path = os.path.join(include_path, name)
if os.path.isdir(path):
includes.append(path)
return includes
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
from absl import flags
from perfkitbenchmarker import non_relational_db
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
from six.moves import range
FLAGS = flags.FLAGS
flags.DEFINE_string('aws_dynamodb_primarykey',
'primary_key',
'The primaryKey of dynamodb table.'
'This switches to sortkey if using sort.'
'If testing GSI/LSI, use the range keyname'
'of the index you want to test')
flags.DEFINE_boolean('aws_dynamodb_use_sort',
False,
'determine whether to use sort key or not')
flags.DEFINE_string('aws_dynamodb_sortkey',
'sort_key',
'The sortkey of dynamodb table. '
'This switches to primarykey if using sort.'
'If testing GSI/LSI, use the primary keyname'
'of the index you want to test')
flags.DEFINE_enum('aws_dynamodb_attributetype',
'S', ['S', 'N', 'B'],
'The type of attribute, default to S (String).'
'Alternates are N (Number) and B (Binary).')
flags.DEFINE_integer('aws_dynamodb_read_capacity',
'5',
'Set RCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_write_capacity',
'5',
'Set WCU for dynamodb table')
flags.DEFINE_integer('aws_dynamodb_lsi_count',
0, 'Set amount of Local Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_lsi_count',
lambda value: -1 < value < 6,
message='--count must be from 0-5')
flags.register_validator('aws_dynamodb_use_sort',
lambda sort: sort or not FLAGS.aws_dynamodb_lsi_count,
message='--aws_dynamodb_lsi_count requires sort key.')
flags.DEFINE_integer('aws_dynamodb_gsi_count',
0, 'Set amount of Global Secondary Indexes. Only set 0-5')
flags.register_validator('aws_dynamodb_gsi_count',
lambda value: -1 < value < 6,
message='--count must be from 0-5')
flags.DEFINE_boolean('aws_dynamodb_ycsb_consistentReads',
False,
"Consistent reads cost 2x eventual reads. "
"'false' is default which is eventual")
flags.DEFINE_integer('aws_dynamodb_connectMax', 50,
'Maximum number of concurrent dynamodb connections. '
'Defaults to 50.')
class _GetIndexes():
"""Used to create secondary indexes."""
def __init__(self):
self.lsi_count = FLAGS.aws_dynamodb_lsi_count
self.gsi_count = FLAGS.aws_dynamodb_gsi_count
def CreateLocalSecondaryIndex(self):
"""Used to create local secondary indexes."""
lsi_items = []
lsi_entry = []
attr_list = []
for lsi in range(0, self.lsi_count):
lsi_item = ('{{"IndexName": "lsiidx{0}",'
'"KeySchema": [{{'
'"AttributeName": "{1}",'
'"KeyType": "HASH"}},{{'
'"AttributeName": "lattr{2}",'
'"KeyType": "RANGE"}}],'
'"Projection": {{'
'"ProjectionType": "KEYS_ONLY"}}}}'.format(
str(lsi),
FLAGS.aws_dynamodb_primarykey,
str(lsi)))
lsi_entry.append(lsi_item)
attr_list.append('{{"AttributeName": "lattr{0}","AttributeType": "{1}"}}'
.format(str(lsi), FLAGS.aws_dynamodb_attributetype))
lsi_items.append('[' + ','.join(lsi_entry) + ']')
lsi_items.append(','.join(attr_list))
return lsi_items
def CreateGlobalSecondaryIndex(self):
"""Used to create global secondary indexes."""
gsi_items = []
gsi_entry = []
attr_list = []
for gsi in range(0, self.gsi_count):
gsi_item = ('{{"IndexName": "gsiidx{0}",'
'"KeySchema": [{{'
'"AttributeName": "gsikey{1}",'
'"KeyType": "HASH"}},{{'
'"AttributeName": "gattr{2}",'
'"KeyType": "RANGE"}}],'
'"Projection": {{'
'"ProjectionType": "KEYS_ONLY"}},'
'"ProvisionedThroughput": {{'
'"ReadCapacityUnits": {3},'
'"WriteCapacityUnits": {4}}}}}'.format(str(gsi),
str(gsi),
str(gsi),
5, 5))
gsi_entry.append(gsi_item)
attr_list.append('{{"AttributeName": "gattr{0}","AttributeType": "{1}"}}'
.format(str(gsi), FLAGS.aws_dynamodb_attributetype))
attr_list.append('{{"AttributeName": "gsikey{0}","AttributeType": "{1}"}}'
.format(str(gsi), FLAGS.aws_dynamodb_attributetype))
gsi_items.append('[' + ','.join(gsi_entry) + ']')
gsi_items.append(','.join(attr_list))
return gsi_items
class AwsDynamoDBInstance(non_relational_db.BaseNonRelationalDb):
"""Class for working with DynamoDB."""
SERVICE_TYPE = non_relational_db.DYNAMODB
def __init__(self, table_name, **kwargs):
super(AwsDynamoDBInstance, self).__init__(**kwargs)
self.zone = FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0]
self.region = util.GetRegionFromZone(self.zone)
self.primary_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"HASH\"}}'
.format(FLAGS.aws_dynamodb_primarykey))
self.sort_key = ('{{\"AttributeName\": \"{0}\",\"KeyType\": \"RANGE\"}}'
.format(FLAGS.aws_dynamodb_sortkey))
self.part_attributes = ('{{\"AttributeName\": \"{0}\",'
'\"AttributeType\": \"{1}\"}}'
.format(FLAGS.aws_dynamodb_primarykey,
FLAGS.aws_dynamodb_attributetype))
self.sort_attributes = ('{{\"AttributeName\": \"{0}\",'
'\"AttributeType\": \"{1}\"}}'
.format(FLAGS.aws_dynamodb_sortkey,
FLAGS.aws_dynamodb_attributetype))
self.table_name = table_name
self.throughput = 'ReadCapacityUnits={read},WriteCapacityUnits={write}'.format(
read=FLAGS.aws_dynamodb_read_capacity,
write=FLAGS.aws_dynamodb_write_capacity)
self.lsi_indexes = _GetIndexes().CreateLocalSecondaryIndex()
self.gsi_indexes = _GetIndexes().CreateGlobalSecondaryIndex()
def _Create(self):
"""Creates the dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'create-table',
'--region', self.region,
'--table-name', self.table_name,
'--attribute-definitions', self.part_attributes,
'--key-schema', self.primary_key,
'--provisioned-throughput', self.throughput,
'--tags'] + util.MakeFormattedDefaultTags()
if FLAGS.aws_dynamodb_lsi_count > 0 and FLAGS.aws_dynamodb_use_sort:
cmd[10] = (
'[' + self.part_attributes + ', ' + self.sort_attributes + ', ' +
self.lsi_indexes[1] + ']')
logging.info('adding to --attribute definitions')
cmd.append('--local-secondary-indexes')
cmd.append(self.lsi_indexes[0])
cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']')
logging.info('adding to --key-schema')
elif FLAGS.aws_dynamodb_use_sort:
cmd[10] = ('[' + self.part_attributes + ', ' + self.sort_attributes + ']')
logging.info('adding to --attribute definitions')
cmd[12] = ('[' + self.primary_key + ', ' + self.sort_key + ']')
logging.info('adding to --key-schema')
if FLAGS.aws_dynamodb_gsi_count > 0:
cmd[10] = cmd[10][:-1]
cmd[10] += (', ' + self.gsi_indexes[1] + ']')
logging.info('adding to --attribute definitions')
cmd.append('--global-secondary-indexes')
cmd.append(self.gsi_indexes[0])
_, stderror, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
logging.warning('Failed to create table! %s', stderror)
def _Delete(self):
"""Deletes the dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'delete-table',
'--region', self.region,
'--table-name', self.table_name]
logging.info('Attempting deletion: ')
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsReady(self):
"""Check if dynamodb table is ready."""
logging.info('Getting table ready status for %s', self.table_name)
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
return result['Table']['TableStatus'] == 'ACTIVE'
def _Exists(self):
"""Returns true if the dynamodb table exists."""
logging.info('Checking if table %s exists', self.table_name)
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
_, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
else:
return True
def _DescribeTable(self):
"""Calls describe on dynamodb table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--region', self.region,
'--table-name', self.table_name]
stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
logging.info('Could not find table %s, %s', self.table_name, stderr)
return {}
for table_info in json.loads(stdout)['Table']:
if table_info[3] == self.table_name:
return table_info
return {}
def GetEndPoint(self):
ddbep = 'http://dynamodb.{0}.amazonaws.com'.format(self.region)
return ddbep
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the dynamodb instance.
Returns:
dict mapping string property key to value.
"""
return {
'aws_dynamodb_primarykey': FLAGS.aws_dynamodb_primarykey,
'aws_dynamodb_use_sort': FLAGS.aws_dynamodb_use_sort,
'aws_dynamodb_sortkey': FLAGS.aws_dynamodb_sortkey,
'aws_dynamodb_attributetype': FLAGS.aws_dynamodb_attributetype,
'aws_dynamodb_read_capacity': FLAGS.aws_dynamodb_read_capacity,
'aws_dynamodb_write_capacity': FLAGS.aws_dynamodb_write_capacity,
'aws_dynamodb_lsi_count': FLAGS.aws_dynamodb_lsi_count,
'aws_dynamodb_gsi_count': FLAGS.aws_dynamodb_gsi_count,
'aws_dynamodb_consistentReads': FLAGS.aws_dynamodb_ycsb_consistentReads,
'aws_dynamodb_connectMax': FLAGS.aws_dynamodb_connectMax,
}
def AddTagsToExistingInstance(table_name, region):
"""Add tags to an existing DynamoDB table."""
cmd = util.AWS_PREFIX + [
'dynamodb',
'describe-table',
'--table-name', table_name,
'--region', region
]
stdout, _, _ = vm_util.IssueCommand(cmd)
resource_arn = json.loads(stdout)['Table']['TableArn']
cmd = util.AWS_PREFIX + [
'dynamodb', 'tag-resource', '--resource-arn', resource_arn, '--region',
region, '--tags'
] + util.MakeFormattedDefaultTags()
vm_util.IssueCommand(cmd)
|
from xknx.devices import Light as XknxLight
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
import homeassistant.util.color as color_util
from .const import DOMAIN
from .knx_entity import KnxEntity
DEFAULT_COLOR = (0.0, 0.0)
DEFAULT_BRIGHTNESS = 255
DEFAULT_WHITE_VALUE = 255
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up lights for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxLight):
entities.append(KNXLight(device))
async_add_entities(entities)
class KNXLight(KnxEntity, LightEntity):
"""Representation of a KNX light."""
def __init__(self, device: XknxLight):
"""Initialize of KNX light."""
super().__init__(device)
self._min_kelvin = device.min_kelvin
self._max_kelvin = device.max_kelvin
self._min_mireds = color_util.color_temperature_kelvin_to_mired(
self._max_kelvin
)
self._max_mireds = color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._device.supports_brightness:
return self._device.current_brightness
hsv_color = self._hsv_color
if self._device.supports_color and hsv_color:
return round(hsv_color[-1] / 100 * 255)
return None
@property
def hs_color(self):
"""Return the HS color value."""
rgb = None
if self._device.supports_rgbw or self._device.supports_color:
rgb, _ = self._device.current_color
return color_util.color_RGB_to_hs(*rgb) if rgb else None
@property
def _hsv_color(self):
"""Return the HSV color value."""
rgb = None
if self._device.supports_rgbw or self._device.supports_color:
rgb, _ = self._device.current_color
return color_util.color_RGB_to_hsv(*rgb) if rgb else None
@property
def white_value(self):
"""Return the white value."""
white = None
if self._device.supports_rgbw:
_, white = self._device.current_color
return white
@property
def color_temp(self):
"""Return the color temperature in mireds."""
if self._device.supports_color_temperature:
kelvin = self._device.current_color_temperature
if kelvin is not None:
return color_util.color_temperature_kelvin_to_mired(kelvin)
if self._device.supports_tunable_white:
relative_ct = self._device.current_tunable_white
if relative_ct is not None:
# as KNX devices typically use Kelvin we use it as base for
# calculating ct from percent
return color_util.color_temperature_kelvin_to_mired(
self._min_kelvin
+ ((relative_ct / 255) * (self._max_kelvin - self._min_kelvin))
)
return None
@property
def min_mireds(self):
"""Return the coldest color temp this light supports in mireds."""
return self._min_mireds
@property
def max_mireds(self):
"""Return the warmest color temp this light supports in mireds."""
return self._max_mireds
@property
def effect_list(self):
"""Return the list of supported effects."""
return None
@property
def effect(self):
"""Return the current effect."""
return None
@property
def is_on(self):
"""Return true if light is on."""
return self._device.state
@property
def supported_features(self):
"""Flag supported features."""
flags = 0
if self._device.supports_brightness:
flags |= SUPPORT_BRIGHTNESS
if self._device.supports_color:
flags |= SUPPORT_COLOR | SUPPORT_BRIGHTNESS
if self._device.supports_rgbw:
flags |= SUPPORT_COLOR | SUPPORT_WHITE_VALUE
if (
self._device.supports_color_temperature
or self._device.supports_tunable_white
):
flags |= SUPPORT_COLOR_TEMP
return flags
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
white_value = kwargs.get(ATTR_WHITE_VALUE, self.white_value)
mireds = kwargs.get(ATTR_COLOR_TEMP, self.color_temp)
update_brightness = ATTR_BRIGHTNESS in kwargs
update_color = ATTR_HS_COLOR in kwargs
update_white_value = ATTR_WHITE_VALUE in kwargs
update_color_temp = ATTR_COLOR_TEMP in kwargs
# avoid conflicting changes and weird effects
if not (
self.is_on
or update_brightness
or update_color
or update_white_value
or update_color_temp
):
await self._device.set_on()
if self._device.supports_brightness and (
update_brightness and not update_color
):
# if we don't need to update the color, try updating brightness
# directly if supported; don't do it if color also has to be
# changed, as RGB color implicitly sets the brightness as well
await self._device.set_brightness(brightness)
elif (self._device.supports_rgbw or self._device.supports_color) and (
update_brightness or update_color or update_white_value
):
# change RGB color, white value (if supported), and brightness
# if brightness or hs_color was not yet set use the default value
# to calculate RGB from as a fallback
if brightness is None:
brightness = DEFAULT_BRIGHTNESS
if hs_color is None:
hs_color = DEFAULT_COLOR
if white_value is None and self._device.supports_rgbw:
white_value = DEFAULT_WHITE_VALUE
rgb = color_util.color_hsv_to_RGB(*hs_color, brightness * 100 / 255)
await self._device.set_color(rgb, white_value)
if update_color_temp:
kelvin = int(color_util.color_temperature_mired_to_kelvin(mireds))
kelvin = min(self._max_kelvin, max(self._min_kelvin, kelvin))
if self._device.supports_color_temperature:
await self._device.set_color_temperature(kelvin)
elif self._device.supports_tunable_white:
relative_ct = int(
255
* (kelvin - self._min_kelvin)
/ (self._max_kelvin - self._min_kelvin)
)
await self._device.set_tunable_white(relative_ct)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._device.set_off()
|
import numpy as np
from ..epochs import BaseEpochs
from ..io.pick import _picks_to_idx
from ..io.base import BaseRaw
from ..utils import _check_preload, _validate_type, _check_option, verbose
@verbose
def regress_artifact(inst, picks=None, picks_artifact='eog', betas=None,
copy=True, verbose=None):
"""Regress artifacts using reference channels.
Parameters
----------
inst : instance of Epochs | Raw
The instance to process.
%(picks_good_data)s
picks_artifact : array-like | str
Channel picks to use as predictor/explanatory variables capturing
the artifact of interest (default is "eog").
betas : ndarray, shape (n_picks, n_picks_ref) | None
The regression coefficients to use. If None (default), they will be
estimated from the data.
copy : bool
If True (default), copy the instance before modifying it.
%(verbose)s
Returns
-------
inst : instance of Epochs | Raw
The processed data.
betas : ndarray, shape (n_picks, n_picks_ref)
The betas used during regression.
Notes
-----
To implement the method outlined in :footcite:`GrattonEtAl1983`,
remove the evoked response from epochs before estimating the
regression coefficients, then apply those regression coefficients to the
original data in two calls like (here for a single-condition ``epochs``
only):
>>> epochs_no_ave = epochs.copy().subtract_evoked() # doctest:+SKIP
>>> _, betas = mne.preprocessing.regress(epochs_no_ave) # doctest:+SKIP
>>> epochs_clean, _ = mne.preprocessing.regress(epochs, betas=betas) # doctest:+SKIP
References
----------
.. footbibliography::
""" # noqa: E501
_check_preload(inst, 'regress')
_validate_type(inst, (BaseEpochs, BaseRaw), 'inst', 'Epochs or Raw')
picks = _picks_to_idx(inst.info, picks, none='data')
picks_artifact = _picks_to_idx(inst.info, picks_artifact)
if np.in1d(picks_artifact, picks).any():
raise ValueError('picks_artifact cannot be contained in picks')
inst = inst.copy() if copy else inst
artifact_data = inst._data[..., picks_artifact, :]
ref_data = artifact_data - np.mean(artifact_data, -1, keepdims=True)
if ref_data.ndim == 3:
ref_data = ref_data.transpose(1, 0, 2).reshape(len(picks_artifact), -1)
cov = np.dot(ref_data, ref_data.T)
# process each one separately to reduce memory load
betas_shape = (len(picks), len(picks_artifact))
if betas is None:
betas = np.empty(betas_shape)
estimate = True
else:
estimate = False
betas = np.asarray(betas, dtype=float)
_check_option('betas.shape', betas.shape, (betas_shape,))
for pi, pick in enumerate(picks):
this_data = inst._data[..., pick, :] # view
orig_shape = this_data.shape
if estimate:
# subtract mean over time from every trial/channel
cov_data = this_data - np.mean(this_data, -1, keepdims=True)
cov_data = cov_data.reshape(1, -1)
betas[pi] = np.linalg.solve(cov, np.dot(ref_data, cov_data.T)).T[0]
# subtract weighted (demeaned) eye channels from channel
this_data -= (betas[pi] @ ref_data).reshape(orig_shape)
return inst, betas
|
from homeassistant.components.lock import SUPPORT_OPEN, LockEntity
from tests.common import MockEntity
ENTITIES = {}
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
{}
if empty
else {
"support_open": MockLock(
name="Support open Lock",
is_locked=True,
supported_features=SUPPORT_OPEN,
unique_id="unique_support_open",
),
"no_support_open": MockLock(
name="No support open Lock",
is_locked=True,
supported_features=0,
unique_id="unique_no_support_open",
),
}
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(list(ENTITIES.values()))
class MockLock(MockEntity, LockEntity):
"""Mock Lock class."""
@property
def is_locked(self):
"""Return true if the lock is locked."""
return self._handle("is_locked")
@property
def supported_features(self):
"""Return the class of this sensor."""
return self._handle("supported_features")
|
import nltk
from matchzoo.utils.bert_utils import is_chinese_char, \
whitespace_tokenize, run_split_on_punc
from .unit import Unit
class Tokenize(Unit):
"""Process unit for text tokenization."""
def transform(self, input_: str) -> list:
"""
Process input data from raw terms to list of tokens.
:param input_: raw textual input.
:return tokens: tokenized tokens as a list.
"""
return nltk.word_tokenize(input_)
class ChineseTokenize(Unit):
"""Process unit for text containing Chinese tokens."""
def transform(self, input_: str) -> str:
"""
Process input data from raw terms to processed text.
:param input_: raw textual input.
:return output: text with at least one blank between adjacent
Chinese tokens.
"""
output = []
for char in input_:
cp = ord(char)
if is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
class BasicTokenize(Unit):
"""Process unit for text tokenization."""
def transform(self, input_: str) -> list:
"""
Process input data from raw terms to list of tokens.
:param input_: raw textual input.
:return tokens: tokenized tokens as a list.
"""
orig_tokens = whitespace_tokenize(input_)
split_tokens = []
for token in orig_tokens:
split_tokens.extend(run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
class WordPieceTokenize(Unit):
"""Process unit for text tokenization."""
def __init__(self, vocab: dict, max_input_chars_per_word: int = 200):
"""Initialization."""
self.vocab = vocab
self.unk_token = '[UNK]'
self.max_input_chars_per_word = max_input_chars_per_word
def transform(self, input_: list) -> list:
"""
Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example:
>>> input_list = ["unaffable"]
>>> vocab = {"un": 0, "##aff": 1, "##able":2}
>>> wordpiece_unit = WordPieceTokenize(vocab)
>>> output = wordpiece_unit.transform(input_list)
>>> golden_output = ["un", "##aff", "##able"]
>>> assert output == golden_output
:param input_: token list.
:return tokens: A list of wordpiece tokens.
"""
output_tokens = []
for token in input_:
chars = list(token)
token_length = len(chars)
if token_length > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
unknown_suffix = False
start = 0
sub_tokens = []
while start < token_length:
end = token_length
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
unknown_suffix = True
break
sub_tokens.append(cur_substr)
start = end
if unknown_suffix:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
|
import logging
from pycomfoconnect import Bridge, ComfoConnect
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PIN,
CONF_TOKEN,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
_LOGGER = logging.getLogger(__name__)
DOMAIN = "comfoconnect"
SIGNAL_COMFOCONNECT_UPDATE_RECEIVED = "comfoconnect_update_received_{}"
CONF_USER_AGENT = "user_agent"
DEFAULT_NAME = "ComfoAirQ"
DEFAULT_PIN = 0
DEFAULT_TOKEN = "00000000000000000000000000000001"
DEFAULT_USER_AGENT = "Home Assistant"
DEVICE = None
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TOKEN, default=DEFAULT_TOKEN): vol.Length(
min=32, max=32, msg="invalid token"
),
vol.Optional(CONF_USER_AGENT, default=DEFAULT_USER_AGENT): cv.string,
vol.Optional(CONF_PIN, default=DEFAULT_PIN): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the ComfoConnect bridge."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
name = conf[CONF_NAME]
token = conf[CONF_TOKEN]
user_agent = conf[CONF_USER_AGENT]
pin = conf[CONF_PIN]
# Run discovery on the configured ip
bridges = Bridge.discover(host)
if not bridges:
_LOGGER.error("Could not connect to ComfoConnect bridge on %s", host)
return False
bridge = bridges[0]
_LOGGER.info("Bridge found: %s (%s)", bridge.uuid.hex(), bridge.host)
# Setup ComfoConnect Bridge
ccb = ComfoConnectBridge(hass, bridge, name, token, user_agent, pin)
hass.data[DOMAIN] = ccb
# Start connection with bridge
ccb.connect()
# Schedule disconnect on shutdown
def _shutdown(_event):
ccb.disconnect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
# Load platforms
discovery.load_platform(hass, "fan", DOMAIN, {}, config)
return True
class ComfoConnectBridge:
"""Representation of a ComfoConnect bridge."""
def __init__(self, hass, bridge, name, token, friendly_name, pin):
"""Initialize the ComfoConnect bridge."""
self.data = {}
self.name = name
self.hass = hass
self.unique_id = bridge.uuid.hex()
self.comfoconnect = ComfoConnect(
bridge=bridge,
local_uuid=bytes.fromhex(token),
local_devicename=friendly_name,
pin=pin,
)
self.comfoconnect.callback_sensor = self.sensor_callback
def connect(self):
"""Connect with the bridge."""
_LOGGER.debug("Connecting with bridge")
self.comfoconnect.connect(True)
def disconnect(self):
"""Disconnect from the bridge."""
_LOGGER.debug("Disconnecting from bridge")
self.comfoconnect.disconnect()
def sensor_callback(self, var, value):
"""Notify listeners that we have received an update."""
_LOGGER.debug("Received update for %s: %s", var, value)
dispatcher_send(
self.hass, SIGNAL_COMFOCONNECT_UPDATE_RECEIVED.format(var), value
)
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import ATTR_VALUE, DOMAIN, SERVICE_SET_VALUE
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
try:
float(state.state)
except ValueError:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service = SERVICE_SET_VALUE
service_data = {ATTR_ENTITY_ID: state.entity_id, ATTR_VALUE: state.state}
try:
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
except vol.Invalid as err:
# If value out of range.
_LOGGER.warning("Unable to reproduce state for %s: %s", state.entity_id, err)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Input number states."""
# Reproduce states in parallel.
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
import logging
import service_configuration_lib
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import prompt_pick_one
log = logging.getLogger(__name__)
def load_adhoc_job_config(
service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR
):
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service=service,
instance=instance,
instance_type="adhoc",
cluster=cluster,
soa_dir=soa_dir,
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = AdhocJobConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return AdhocJobConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
class AdhocJobConfig(LongRunningServiceConfig):
config_filename_prefix = "adhoc"
def __init__(
self,
service: str,
instance: str,
cluster: str,
config_dict: LongRunningServiceConfigDict,
branch_dict: BranchDictV2,
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
def get_default_interactive_config(
service: str, cluster: str, soa_dir: str, load_deployments: bool = False
) -> AdhocJobConfig:
default_job_config = {"cpus": 4, "mem": 10240, "disk": 1024}
try:
job_config = load_adhoc_job_config(
service=service, instance="interactive", cluster=cluster, soa_dir=soa_dir
)
except NoConfigurationForServiceError:
job_config = AdhocJobConfig(
service=service,
instance="interactive",
cluster=cluster,
config_dict={},
branch_dict=None,
soa_dir=soa_dir,
)
except NoDeploymentsAvailable:
job_config = load_adhoc_job_config(
service=service,
instance="interactive",
cluster=cluster,
soa_dir=soa_dir,
load_deployments=False,
)
if not job_config.branch_dict and load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
deploy_group = prompt_pick_one(
deployments_json.get_deploy_groups(), choosing="deploy group"
)
job_config.config_dict["deploy_group"] = deploy_group
job_config.branch_dict = {
"docker_image": deployments_json.get_docker_image_for_deploy_group(
deploy_group
),
"git_sha": deployments_json.get_git_sha_for_deploy_group(deploy_group),
"force_bounce": None,
"desired_state": "start",
}
for key, value in default_job_config.items():
job_config.config_dict.setdefault(key, value)
return job_config
|
from hatasmota.light import (
LIGHT_TYPE_COLDWARM,
LIGHT_TYPE_NONE,
LIGHT_TYPE_RGB,
LIGHT_TYPE_RGBCW,
LIGHT_TYPE_RGBW,
)
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
import homeassistant.util.color as color_util
from .const import DATA_REMOVE_DISCOVER_COMPONENT, DOMAIN as TASMOTA_DOMAIN
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
DEFAULT_BRIGHTNESS_MAX = 255
TASMOTA_BRIGHTNESS_MAX = 100
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota light dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota light."""
async_add_entities(
[TasmotaLight(tasmota_entity=tasmota_entity, discovery_hash=discovery_hash)]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(light.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(light.DOMAIN, TASMOTA_DOMAIN),
async_discover,
)
class TasmotaLight(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
LightEntity,
):
"""Representation of a Tasmota light."""
def __init__(self, **kwds):
"""Initialize Tasmota light."""
self._state = False
self._supported_features = 0
self._brightness = None
self._color_temp = None
self._effect = None
self._hs = None
self._white_value = None
self._flash_times = None
super().__init__(
discovery_update=self.discovery_update,
**kwds,
)
self._setup_from_entity()
async def discovery_update(self, update, write_state=True):
"""Handle updated discovery message."""
await super().discovery_update(update, write_state=False)
self._setup_from_entity()
self.async_write_ha_state()
def _setup_from_entity(self):
"""(Re)Setup the entity."""
supported_features = 0
light_type = self._tasmota_entity.light_type
if light_type != LIGHT_TYPE_NONE:
supported_features |= SUPPORT_BRIGHTNESS
supported_features |= SUPPORT_TRANSITION
if light_type in [LIGHT_TYPE_COLDWARM, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR_TEMP
if light_type in [LIGHT_TYPE_RGB, LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_COLOR
supported_features |= SUPPORT_EFFECT
if light_type in [LIGHT_TYPE_RGBW, LIGHT_TYPE_RGBCW]:
supported_features |= SUPPORT_WHITE_VALUE
self._supported_features = supported_features
@callback
def state_updated(self, state, **kwargs):
"""Handle state updates."""
self._state = state
attributes = kwargs.get("attributes")
if attributes:
if "brightness" in attributes:
brightness = float(attributes["brightness"])
percent_bright = brightness / TASMOTA_BRIGHTNESS_MAX
self._brightness = percent_bright * 255
if "color" in attributes:
color = attributes["color"]
self._hs = color_util.color_RGB_to_hs(*color)
if "color_temp" in attributes:
self._color_temp = attributes["color_temp"]
if "effect" in attributes:
self._effect = attributes["effect"]
if "white_value" in attributes:
white_value = float(attributes["white_value"])
percent_white = white_value / TASMOTA_BRIGHTNESS_MAX
self._white_value = percent_white * 255
self.async_write_ha_state()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._tasmota_entity.min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._tasmota_entity.max_mireds
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._tasmota_entity.effect_list
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
supported_features = self._supported_features
attributes = {}
if ATTR_HS_COLOR in kwargs and supported_features & SUPPORT_COLOR:
hs_color = kwargs[ATTR_HS_COLOR]
attributes["color"] = {}
rgb = color_util.color_hsv_to_RGB(hs_color[0], hs_color[1], 100)
attributes["color"] = [rgb[0], rgb[1], rgb[2]]
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
if ATTR_BRIGHTNESS in kwargs and supported_features & SUPPORT_BRIGHTNESS:
brightness_normalized = kwargs[ATTR_BRIGHTNESS] / DEFAULT_BRIGHTNESS_MAX
device_brightness = min(
round(brightness_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
# Make sure the brightness is not rounded down to 0
device_brightness = max(device_brightness, 1)
attributes["brightness"] = device_brightness
if ATTR_COLOR_TEMP in kwargs and supported_features & SUPPORT_COLOR_TEMP:
attributes["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if ATTR_EFFECT in kwargs:
attributes["effect"] = kwargs[ATTR_EFFECT]
if ATTR_WHITE_VALUE in kwargs:
white_value_normalized = kwargs[ATTR_WHITE_VALUE] / DEFAULT_BRIGHTNESS_MAX
device_white_value = min(
round(white_value_normalized * TASMOTA_BRIGHTNESS_MAX),
TASMOTA_BRIGHTNESS_MAX,
)
attributes["white_value"] = device_white_value
self._tasmota_entity.set_state(True, attributes)
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
attributes = {"state": "OFF"}
if ATTR_TRANSITION in kwargs:
attributes["transition"] = kwargs[ATTR_TRANSITION]
self._tasmota_entity.set_state(False, attributes)
|
import collections
import os
import pytest
from molecule import migrate
@pytest.fixture
def _instance():
molecule_file = os.path.join(
os.path.dirname(__file__), os.path.pardir, 'resources',
'molecule_v1_vagrant.yml')
return migrate.Migrate(molecule_file)
def test_get_v1_config(_instance):
data = _instance._get_v1_config()
assert isinstance(data, dict)
def test_v1_member(_instance):
assert isinstance(_instance._v1, dict)
def test_v2_member(_instance):
assert isinstance(_instance._v2, collections.OrderedDict)
def test_dump(_instance):
x = """
---
dependency:
name: galaxy
driver:
name: vagrant
provider:
name: virtualbox
lint:
name: yamllint
platforms:
- name: host.example.com
box: namespace/rhel-7
box_version: 7.2.0
box_url: http://example.com/pub/rhel-7.json
memory: 4096
cpus: 2
groups:
- group1
- group2
interfaces:
- auto_config: true
network_name: private_network
type: dhcp
raw_config_args:
- foo
- bar
provisioner:
name: ansible
env:
FOO: bar
options:
extra-vars: foo=bar
verbose: true
become: true
tags: foo,bar
lint:
name: ansible-lint
scenario:
name: default
verifier:
name: testinfra
options:
sudo: true
lint:
name: flake8
""".lstrip()
assert x == _instance.dump()
def test_convert(_instance, patched_logger_info):
x = {
'scenario': {
'name': 'default',
},
'platforms': [{
'box':
'namespace/rhel-7',
'box_version':
'7.2.0',
'name':
'host.example.com',
'interfaces': [{
'type': 'dhcp',
'network_name': 'private_network',
'auto_config': True,
}],
'cpus':
2,
'box_url':
'http://example.com/pub/rhel-7.json',
'groups': [
'group1',
'group2',
],
'memory':
4096,
'raw_config_args': [
'foo',
'bar',
],
}],
'lint': {
'name': 'yamllint',
},
'driver': {
'name': 'vagrant',
'provider': {
'name': 'virtualbox',
},
},
'dependency': {
'name': 'galaxy',
},
'verifier': {
'lint': {
'name': 'flake8',
},
'name': 'testinfra',
'options': {
'sudo': True,
}
},
'provisioner': {
'lint': {
'name': 'ansible-lint',
},
'name': 'ansible',
'env': {
'FOO': 'bar',
},
'options': {
'become': True,
'extra-vars': 'foo=bar',
'verbose': True,
'tags': 'foo,bar',
}
}
}
data = _instance._convert()
assert x == _instance._to_dict(data)
msg = 'Vagrant syle v1 config found'
patched_logger_info.assert_called_once_with(msg)
def test_convert_raises_on_invalid_migration_config(_instance,
patched_logger_critical):
del _instance._v1['vagrant']
with pytest.raises(SystemExit) as e:
_instance._convert()
assert 1 == e.value.code
msg = 'Vagrant migrations only supported. Exiting.'
patched_logger_critical.assert_called_once_with(msg)
def test_check_errors(_instance):
assert _instance._check_errors({}) is None
def test_check_errors_raises_on_errors(_instance, patched_logger_critical):
with pytest.raises(SystemExit) as e:
_instance._check_errors('unit test error')
assert 1 == e.value.code
msg = 'Failed to validate.\n\nunit test error'
patched_logger_critical.assert_called_once_with(msg)
|
from ..cog_utils import CompositeMetaClass
from .equalizer import EqualizerUtilities
from .formatting import FormattingUtilities
from .local_tracks import LocalTrackUtilities
from .miscellaneous import MiscellaneousUtilities
from .parsers import ParsingUtilities
from .player import PlayerUtilities
from .playlists import PlaylistUtilities
from .queue import QueueUtilities
from .validation import ValidationUtilities
class Utilities(
EqualizerUtilities,
FormattingUtilities,
LocalTrackUtilities,
MiscellaneousUtilities,
PlayerUtilities,
PlaylistUtilities,
QueueUtilities,
ValidationUtilities,
ParsingUtilities,
metaclass=CompositeMetaClass,
):
"""Class joining all utility subclasses"""
|