text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
A collection of distutils extensions adding things like automatic 2to3
translation, a test runner, and working around broken stdlib extensions CFLAG
passing in distutils.
Specifically, this module is only meant to be imported in setup.py scripts.
"""
import copy
import errno
import inspect
import io
import math
import operator
import os
import re
import shlex
import shutil
import subprocess
import sys
import textwrap
os.environ["SNAKEOIL_DEMANDLOAD_PROTECTION"] = 'n'
os.environ["SNAKEOIL_DEMANDLOAD_WARN"] = 'n'
from setuptools.command import install as dst_install
from distutils import log
from distutils.core import Command, Extension
from distutils.errors import DistutilsExecError
from distutils.command import (
sdist as dst_sdist, build_ext as dst_build_ext, build_py as dst_build_py,
build as dst_build, build_scripts as dst_build_scripts, config as dst_config)
# getting built by readthedocs
READTHEDOCS = os.environ.get('READTHEDOCS', None) == 'True'
# top level repo/tarball directory
TOPDIR = os.path.dirname(os.path.abspath(inspect.stack(0)[1][1]))
def find_project(topdir=TOPDIR):
"""Determine a project's name.
Based on the assumption that the project is only distributing one main
module.
"""
topdir_depth = len(topdir.split('/'))
modules = []
# look for a top-level module
for root, dirs, files in os.walk(topdir):
# only descend at most one level
if len(root.split('/')) > topdir_depth + 1:
continue
if '__init__.py' in files:
modules.append(os.path.basename(root))
if not modules:
raise ValueError('No project module found')
elif len(modules) > 1:
raise ValueError('Multiple project modules found: %s' % (', '.join(modules)))
return modules[0]
# determine the project we're being imported into
PROJECT = find_project()
def version(project=PROJECT):
"""Determine a project's version.
Based on the assumption that a project defines __version__ in its main
module.
"""
version = None
try:
with io.open(os.path.join(TOPDIR, project, '__init__.py'), encoding='utf-8') as f:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE).group(1)
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
if version is None:
raise RuntimeError('Cannot find version for project: %s' % (project,))
return version
def readme(project=PROJECT):
"""Determine a project's long description."""
for doc in ('README.rst', 'README'):
try:
with io.open(os.path.join(TOPDIR, doc), encoding='utf-8') as f:
return f.read()
except IOError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
return None
def get_file_paths(path):
"""Get list of all file paths under a given path."""
for root, dirs, files in os.walk(path):
for f in files:
yield os.path.join(root, f)[len(path):].lstrip('/')
def data_mapping(host_prefix, path, skip=None):
"""Map repo paths to host paths for installed data files."""
skip = list(skip) if skip is not None else []
for root, dirs, files in os.walk(path):
host_path = os.path.join(host_prefix, root.partition(path)[2].lstrip('/'))
repo_path = os.path.join(path, root.partition(path)[2].lstrip('/'))
if repo_path not in skip:
yield (host_path, [os.path.join(root, x) for x in files
if os.path.join(root, x) not in skip])
def pkg_config(*packages, **kw):
"""Translate pkg-config data to compatible Extension parameters.
Example usage:
>>> from distutils.extension import Extension
>>> from pkgdist import pkg_config
>>>
>>> ext_kwargs = dict(
... include_dirs=['include'],
... extra_compile_args=['-std=c++11'],
... )
>>> extensions = [
... Extension('foo', ['foo.c']),
... Extension('bar', ['bar.c'], **pkg_config('lcms2')),
... Extension('ext', ['ext.cpp'], **pkg_config(('nss', 'libusb-1.0'), **ext_kwargs)),
... ]
"""
flag_map = {
'-I': 'include_dirs',
'-L': 'library_dirs',
'-l': 'libraries',
}
try:
tokens = subprocess.check_output(
['pkg-config', '--libs', '--cflags'] + list(packages)).split()
except OSError as e:
sys.stderr.write('running pkg-config failed: {}\n'.format(e.strerror))
sys.exit(1)
for token in tokens:
token = token.decode()
if token[:2] in flag_map:
kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])
else:
kw.setdefault('extra_compile_args', []).append(token)
return kw
class OptionalExtension(Extension):
"""Python extension that is optional to build.
If it's not required to have the exception built, just preferable,
use this class instead of :py:class:`Extension` since the machinery
in this module relies on isinstance to identify what absolutely must
be built vs what would be nice to have built.
"""
pass
class sdist(dst_sdist.sdist):
"""sdist command wrapper to bundle generated files for release."""
package_namespace = PROJECT
def initialize_options(self):
dst_sdist.sdist.initialize_options(self)
def generate_verinfo(self, base_dir):
"""Generate project version module.
This is used by the --version option in interactive programs among
other things.
"""
from snakeoil.version import get_git_version
log.info('generating _verinfo')
data = get_git_version(base_dir)
if not data:
return
path = os.path.join(base_dir, self.package_namespace, '_verinfo.py')
with open(path, 'w') as f:
f.write('version_info=%r' % (data,))
def make_release_tree(self, base_dir, files):
"""Create and populate the directory tree that is put in source tars.
This copies or hardlinks "normal" source files that should go
into the release and adds generated files that should not
exist in a working tree.
"""
if 'build_man' in self.distribution.cmdclass:
build_man = self.reinitialize_command('build_man')
build_man.ensure_finalized()
self.run_command('build_man')
shutil.copytree(os.path.join(os.getcwd(), build_man.content_search_path[0]),
os.path.join(base_dir, build_man.content_search_path[1]))
dst_sdist.sdist.make_release_tree(self, base_dir, files)
self.generate_verinfo(base_dir)
def run(self):
build_ext = self.reinitialize_command('build_ext')
build_ext.ensure_finalized()
# generate cython extensions if any exist
cython = any(
os.path.splitext(f)[1] == '.pyx' for e in
build_ext.extensions for f in e.sources)
if cython:
from Cython.Build import cythonize
cythonize(build_ext.extensions)
dst_sdist.sdist.run(self)
class build_py(dst_build_py.build_py):
"""build_py command wrapper."""
user_options = dst_build_py.build_py.user_options + \
[("inplace", "i", "do any source conversions in place")]
package_namespace = PROJECT
generate_verinfo = True
def initialize_options(self):
dst_build_py.build_py.initialize_options(self)
self.inplace = False
def finalize_options(self):
self.inplace = bool(self.inplace)
if self.inplace:
self.build_lib = '.'
dst_build_py.build_py.finalize_options(self)
def _run_generate_verinfo(self, rebuilds=None):
ver_path = self.get_module_outfile(
self.build_lib, (self.package_namespace,), '_verinfo')
# this should check mtime...
if not os.path.exists(ver_path):
from snakeoil.version import get_git_version
log.info('generating _verinfo')
with open(ver_path, 'w') as f:
f.write("version_info=%r" % (get_git_version('.'),))
self.byte_compile([ver_path])
if rebuilds is not None:
rebuilds.append((ver_path, os.lstat(ver_path).st_mtime))
def run(self):
dst_build_py.build_py.run(self)
if self.generate_verinfo:
self._run_generate_verinfo()
class build_py2to3(build_py):
"""build_py command wrapper that runs 2to3 for py3 targets."""
def _compute_rebuilds(self, force=False):
for base, mod_name, path in self.find_all_modules():
try:
new_mtime = math.floor(os.lstat(path).st_mtime)
except EnvironmentError:
# ok... wtf distutils?
continue
trg_path = os.path.join(self.build_lib, path)
if force:
yield trg_path, new_mtime
continue
try:
old_mtime = math.floor(os.lstat(trg_path).st_mtime)
except EnvironmentError:
yield trg_path, new_mtime
continue
if old_mtime != new_mtime:
yield trg_path, new_mtime
def _inner_run(self, rebuilds):
pass
def get_py2to3_converter(self, options=None, proc_count=0):
from lib2to3 import refactor as ref_mod
from snakeoil.dist import caching_2to3
if proc_count == 0:
import multiprocessing
proc_count = multiprocessing.cpu_count()
assert proc_count >= 1
if proc_count > 1 and not caching_2to3.multiprocessing_available:
proc_count = 1
refactor_kls = caching_2to3.MultiprocessRefactoringTool
fixer_names = ref_mod.get_fixers_from_package('lib2to3.fixes')
f = refactor_kls(fixer_names, options=options).refactor
def f2(*args, **kwds):
if caching_2to3.multiprocessing_available:
kwds['num_processes'] = proc_count
return f(*args, **kwds)
return f2
def run(self):
py3k_rebuilds = []
if not self.inplace:
if is_py3k:
py3k_rebuilds = list(self._compute_rebuilds(self.force))
dst_build_py.build_py.run(self)
if self.generate_verinfo:
self._run_generate_verinfo(py3k_rebuilds)
self._inner_run(py3k_rebuilds)
if not is_py3k:
return
converter = self.get_py2to3_converter()
log.info("starting 2to3 conversion; this may take a while...")
converter([x[0] for x in py3k_rebuilds], write=True)
for path, mtime in py3k_rebuilds:
os.utime(path, (-1, mtime))
log.info("completed py3k conversions")
class build_py3to2(build_py2to3):
"""build_py command wrapper that runs 3to2 for py2 targets."""
def run(self):
py2k_rebuilds = []
if not self.inplace:
if not is_py3k:
py2k_rebuilds = list(self._compute_rebuilds(self.force))
dst_build_py.build_py.run(self)
if self.generate_verinfo:
self._run_generate_verinfo(py2k_rebuilds)
self._inner_run(py2k_rebuilds)
if is_py3k:
return
from lib3to2.build import run_3to2
from lib2to3 import refactor
# assume a few fixes are already handled in the code or aren't needed
# for py27
skip_list = (
'lib3to2.fixes.fix_str', 'lib3to2.fixes.fix_printfunction',
'lib3to2.fixes.fix_except', 'lib3to2.fixes.fix_with',
)
fixer_names = [x for x in refactor.get_fixers_from_package('lib3to2.fixes')
if x not in skip_list]
log.info("starting 3to2 conversion; this may take a while...")
run_3to2([x[0] for x in py2k_rebuilds], fixer_names=fixer_names)
for path, mtime in py2k_rebuilds:
os.utime(path, (-1, mtime))
log.info("completed py2k conversions")
class build_man(Command):
"""Build man pages.
Override the module search path before running sphinx. This fixes
generating man pages for scripts that need to import modules generated via
2to3 or other conversions instead of straight from the build directory.
"""
user_options = [
("force", "f", "force build as needed"),
]
content_search_path = ('build/sphinx/man', 'man')
def initialize_options(self):
self.force = False
def finalize_options(self):
self.force = bool(self.force)
def skip(self):
# don't rebuild if one of the output dirs exist
if any(os.path.exists(x) for x in self.content_search_path):
log.info('%s: docs already built, skipping regeneration...' %
(self.__class__.__name__,))
return True
return False
def run(self):
if self.force or not self.skip():
# Use a built version for the man page generation process that
# imports script modules.
build_py = self.reinitialize_command('build_py')
build_py.ensure_finalized()
self.run_command('build_py')
syspath = sys.path[:]
sys.path.insert(0, os.path.abspath(build_py.build_lib))
# generate man page content for scripts we create
if 'build_scripts' in self.distribution.cmdclass:
from snakeoil.dist.generate_docs import generate_man
generate_man(PROJECT, TOPDIR)
# generate man pages
build_sphinx = self.reinitialize_command('build_sphinx')
build_sphinx.builder = 'man'
build_sphinx.ensure_finalized()
self.run_command('build_sphinx')
sys.path = syspath
class build_docs(build_man):
"""Build html docs."""
user_options = [
("force", "f", "force build as needed"),
]
content_search_path = ('build/sphinx/html', 'html')
def initialize_options(self):
self.force = False
def finalize_options(self):
self.force = bool(self.force)
def run(self):
if self.force or not self.skip():
# generate man pages -- html versions of man pages are provided
self.run_command('build_man')
# generate API docs
from snakeoil.dist.generate_docs import generate_html
generate_html(PROJECT, TOPDIR)
# generate html docs -- allow build_sphinx cmd to run again
build_sphinx = self.reinitialize_command('build_sphinx')
build_sphinx.builder = 'html'
build_sphinx.ensure_finalized()
self.run_command('build_sphinx')
class build_ext(dst_build_ext.build_ext):
user_options = dst_build_ext.build_ext.user_options + [
("build-optional=", "o", "build optional C modules"),
("disable-distutils-flag-fixing", None,
"disable fixing of issue 969718 in python, adding missing -fno-strict-aliasing"),
]
boolean_options = dst_build.build.boolean_options + ["build-optional"]
def initialize_options(self):
dst_build_ext.build_ext.initialize_options(self)
self.build_optional = None
self.disable_distutils_flag_fixing = False
self.default_header_install_dir = None
def finalize_options(self):
dst_build_ext.build_ext.finalize_options(self)
if self.build_optional is None and not READTHEDOCS:
self.build_optional = True
self.build_optional = bool(self.build_optional)
if not self.build_optional:
self.extensions = [ext for ext in self.extensions if not isinstance(ext, OptionalExtension)]
# add header install dir to the search path
# (fixes virtualenv builds for consumer extensions)
self.set_undefined_options(
'install',
('install_headers', 'default_header_install_dir'))
if self.default_header_install_dir:
self.default_header_install_dir = os.path.dirname(self.default_header_install_dir)
for e in self.extensions:
# include_dirs may actually be shared between multiple extensions
if self.default_header_install_dir not in e.include_dirs:
e.include_dirs.append(self.default_header_install_dir)
@staticmethod
def determine_ext_lang(ext_path):
"""Determine file extensions for generated cython extensions."""
with open(ext_path) as f:
for line in f:
line = line.lstrip()
if not line:
continue
elif line[0] != '#':
return None
line = line[1:].lstrip()
if line[:10] == 'distutils:':
key, _, value = [s.strip() for s in line[10:].partition('=')]
if key == 'language':
return value
else:
return None
def no_cythonize(self):
"""Determine file paths for generated cython extensions."""
extensions = copy.deepcopy(self.extensions)
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
lang = build_ext.determine_ext_lang(sfile)
if lang == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources[:] = sources
return extensions
def run(self):
# ensure that the platform checks were performed
self.run_command('config')
# only regenerate cython extensions if requested or required
use_cython = (
os.environ.get('USE_CYTHON', False) or
any(not os.path.exists(x) for ext in self.no_cythonize() for x in ext.sources))
if use_cython:
from Cython.Build import cythonize
cythonize(self.extensions)
self.extensions = self.no_cythonize()
return dst_build_ext.build_ext.run(self)
def build_extensions(self):
# say it with me kids... distutils sucks!
for x in ("compiler_so", "compiler", "compiler_cxx"):
if self.debug:
l = [y for y in getattr(self.compiler, x) if y != '-DNDEBUG']
l.append('-Wall')
setattr(self.compiler, x, l)
if not self.disable_distutils_flag_fixing:
val = getattr(self.compiler, x)
if "-fno-strict-aliasing" not in val:
val.append("-fno-strict-aliasing")
if getattr(self.distribution, 'check_defines', None):
val = getattr(self.compiler, x)
for d, result in self.distribution.check_defines.items():
if result:
val.append('-D%s=1' % d)
else:
val.append('-U%s' % d)
return dst_build_ext.build_ext.build_extensions(self)
class build_scripts(dst_build_scripts.build_scripts):
"""Create and build (copy and modify #! line) the wrapper scripts."""
def finalize_options(self):
dst_build_scripts.build_scripts.finalize_options(self)
script_dir = os.path.join(
os.path.dirname(self.build_dir), '.generated_scripts')
self.mkpath(script_dir)
self.scripts = [os.path.join(script_dir, x) for x in os.listdir('bin')]
def run(self):
for script in self.scripts:
with open(script, 'w') as f:
f.write(textwrap.dedent("""\
#!%s
from os.path import basename
from %s import scripts
scripts.main(basename(__file__))
""" % (sys.executable, PROJECT)))
self.copy_scripts()
class build(dst_build.build):
"""Generic build command."""
user_options = dst_build.build.user_options[:]
user_options.append(('enable-man-pages', None, 'build man pages'))
user_options.append(('enable-html-docs', None, 'build html docs'))
boolean_options = dst_build.build.boolean_options[:]
boolean_options.extend(['enable-man-pages', 'enable-html-docs'])
sub_commands = dst_build.build.sub_commands[:]
sub_commands.append(('build_ext', None))
sub_commands.append(('build_py', None))
sub_commands.append(('build_scripts', None))
sub_commands.append(('build_docs', operator.attrgetter('enable_html_docs')))
sub_commands.append(('build_man', operator.attrgetter('enable_man_pages')))
def initialize_options(self):
dst_build.build.initialize_options(self)
self.enable_man_pages = False
self.enable_html_docs = False
def finalize_options(self):
dst_build.build.finalize_options(self)
if self.enable_man_pages is None:
path = os.path.dirname(os.path.abspath(__file__))
self.enable_man_pages = not os.path.exists(os.path.join(path, 'man'))
if self.enable_html_docs is None:
self.enable_html_docs = False
class install_docs(Command):
"""Install html documentation."""
content_search_path = build_docs.content_search_path
user_options = [
('path=', None, "final path to install to; else it's calculated"),
('build-dir=', None, "build directory"),
]
build_command = 'build_docs'
def initialize_options(self):
self.root = None
self.prefix = None
self.path = None
self.build_dir = None
self.content = []
self.source_path = None
def finalize_options(self):
self.set_undefined_options(
'install',
('root', 'root'),
('install_base', 'prefix'),
)
if not self.root:
self.root = '/'
if self.path is None:
self.path = os.path.join(
self.root, self.calculate_install_path().lstrip(os.path.sep))
def calculate_install_path(self):
return os.path.join(
os.path.abspath(self.prefix), 'share', 'doc', PROJECT + '-%s' % version(), 'html')
def find_content(self):
for possible_path in self.content_search_path:
if self.build_dir is not None:
possible_path = os.path.join(self.build_dir, possible_path)
possible_path = os.path.join(TOPDIR, possible_path)
if os.path.isdir(possible_path):
return possible_path
else:
return None
def _map_paths(self, content):
return {x: x for x in content}
def scan_content(self):
self.content = self._map_paths(get_file_paths(self.source_path))
return self.content
def run(self, firstrun=True):
self.source_path = self.find_content()
if self.source_path is None:
if not firstrun:
raise DistutilsExecError(
"no pregenerated sphinx content, and sphinx isn't available "
"to generate it; bailing")
self.run_command(self.build_command)
return self.run(False)
content = self.scan_content()
content = self.content
directories = set(map(os.path.dirname, content.values()))
directories.discard('')
for x in sorted(directories):
self.mkpath(os.path.join(self.path, x))
for src, dst in sorted(content.items()):
self.copy_file(
os.path.join(self.source_path, src),
os.path.join(self.path, dst))
def get_inputs(self):
# Py3k compatibility- force list so behaviour is the same.
return list(self.content)
def get_outputs(self):
# Py3k compatibility- force list so behaviour is the same.
return list(self.content.values())
class install_man(install_docs):
"""Install man pages."""
content_search_path = build_man.content_search_path
build_command = 'build_man'
def calculate_install_path(self):
return os.path.join(self.prefix, 'share', 'man')
def _map_paths(self, content):
d = {}
for x in content:
if len(x) >= 3 and x[-2] == '.' and x[-1].isdigit():
# Only consider extensions .1, .2, .3, etc, and files that
# have at least a single char beyond the extension (thus ignore
# .1, but allow a.1).
d[x] = 'man%s/%s' % (x[-1], os.path.basename(x))
return d
class install(dst_install.install):
"""Generic install command."""
user_options = dst_install.install.user_options[:]
user_options.append(('enable-man-pages', None, 'install man pages'))
user_options.append(('enable-html-docs', None, 'install html docs'))
boolean_options = dst_install.install.boolean_options[:]
boolean_options.extend(['enable-man-pages', 'enable-html-docs'])
def initialize_options(self):
dst_install.install.initialize_options(self)
self.enable_man_pages = False
self.enable_html_docs = False
def finalize_options(self):
build_options = self.distribution.command_options.setdefault('build', {})
build_options['enable_html_docs'] = ('command_line', self.enable_html_docs and 1 or 0)
man_pages = self.enable_man_pages
if man_pages and os.path.exists('man'):
man_pages = False
build_options['enable_man_pages'] = ('command_line', man_pages and 1 or 0)
dst_install.install.finalize_options(self)
sub_commands = dst_install.install.sub_commands[:]
sub_commands.append(('install_man', operator.attrgetter('enable_man_pages')))
sub_commands.append(('install_docs', operator.attrgetter('enable_html_docs')))
class test(Command):
"""Run our unit tests in a built copy.
Based on code from setuptools.
"""
blacklist = frozenset()
user_options = [
("inplace", "i", "do building/testing in place"),
("skip-rebuilding", "s", "skip rebuilds. primarily for development"),
("disable-fork", None, "disable forking of the testloader; primarily for debugging. "
"Automatically set in jython, disabled for cpython/unladen-swallow."),
("namespaces=", "t", "run only tests matching these namespaces. "
"comma delimited"),
("pure-python", None, "disable building of extensions. Enabled for jython, disabled elsewhere"),
("force", "f", "force build_py/build_ext as needed"),
("include-dirs=", "I", "include dirs for build_ext if needed"),
]
default_test_namespace = '%s.test' % PROJECT
def initialize_options(self):
self.inplace = False
self.disable_fork = is_jython
self.namespaces = ''
self.pure_python = is_jython
self.force = False
self.include_dirs = None
def finalize_options(self):
self.inplace = bool(self.inplace)
self.disable_fork = bool(self.disable_fork)
self.pure_python = bool(self.pure_python)
self.force = bool(self.force)
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.namespaces:
self.namespaces = tuple(set(self.namespaces.split(',')))
else:
self.namespaces = ()
def run(self):
from snakeoil.dist import unittest_extensions
build_ext = self.reinitialize_command('build_ext')
build_py = self.reinitialize_command('build_py')
build_ext.inplace = build_py.inplace = self.inplace
build_ext.force = build_py.force = self.force
build_ext.ensure_finalized()
build_py.ensure_finalized()
if self.include_dirs:
build_ext.include_dirs = self.include_dirs
if not self.pure_python:
self.run_command('build_ext')
if not self.inplace:
self.run_command('build_py')
syspath = sys.path[:]
mods_to_wipe = ()
if not self.inplace:
cwd = os.getcwd()
syspath = [x for x in sys.path if x != cwd]
test_path = os.path.abspath(build_py.build_lib)
syspath.insert(0, test_path)
mods = build_py.find_all_modules()
mods_to_wipe = set(x[0] for x in mods)
mods_to_wipe.update('.'.join(x[:2]) for x in mods)
namespaces = self.namespaces
if not self.namespaces:
namespaces = [self.default_test_namespace]
retval = unittest_extensions.run_tests(
namespaces, disable_fork=self.disable_fork,
blacklist=self.blacklist, pythonpath=syspath,
modules_to_wipe=mods_to_wipe)
# remove temporary plugincache so it isn't installed
plugincache = os.path.join(
os.path.abspath(build_py.build_lib), build_py.package_namespace,
'plugins/plugincache')
if os.path.exists(plugincache):
os.remove(plugincache)
if retval:
raise DistutilsExecError("tests failed; return %i" % (retval,))
class PyTest(Command):
"""Run tests using pytest against a built copy."""
user_options = [
('pytest-args=', 'a', 'arguments to pass to py.test'),
('coverage', 'c', 'generate coverage info'),
('report=', 'r', 'generate and/or show a coverage report'),
('jobs=', 'j', 'run X parallel tests at once'),
('match=', 'k', 'run only tests that match the provided expressions'),
]
default_test_dir = os.path.join(TOPDIR, PROJECT, 'test')
def initialize_options(self):
self.pytest_args = ''
self.coverage = False
self.match = None
self.jobs = None
self.report = None
def finalize_options(self):
self.test_args = [self.default_test_dir]
self.coverage = bool(self.coverage)
if self.match is not None:
self.test_args.extend(['-k', self.match])
if self.coverage:
try:
import pytest_cov
self.test_args.extend(['--cov', PROJECT])
except ImportError:
sys.stderr.write('error: install pytest-cov for coverage support\n')
sys.exit(1)
if self.report is None:
# disable coverage report output
self.test_args.extend(['--cov-report='])
else:
self.test_args.extend(['--cov-report', self.report])
if self.jobs is not None:
try:
import xdist
self.test_args.extend(['-n', self.jobs])
except ImportError:
sys.stderr.write('error: install pytest-xdist for -j/--jobs support\n')
sys.exit(1)
# add custom pytest args
self.test_args.extend(shlex.split(self.pytest_args))
def run(self):
try:
import pytest
except ImportError:
sys.stderr.write('error: pytest is not installed\n')
sys.exit(1)
# build extensions and byte-compile python
build_ext = self.reinitialize_command('build_ext')
build_py = self.reinitialize_command('build_py')
build_ext.ensure_finalized()
build_py.ensure_finalized()
self.run_command('build_ext')
self.run_command('build_py')
# Change the current working directory to the builddir during testing
# so coverage paths are correct.
builddir = os.path.abspath(build_py.build_lib)
if self.coverage and os.path.exists(os.path.join(TOPDIR, '.coveragerc')):
shutil.copyfile(os.path.join(TOPDIR, '.coveragerc'),
os.path.join(builddir, '.coveragerc'))
ret = subprocess.call([sys.executable, '-m', 'pytest'] + self.test_args, cwd=builddir)
sys.exit(ret)
def print_check(message, if_yes='found', if_no='not found'):
"""Decorator to print pre/post-check messages."""
def sub_decorator(f):
def sub_func(*args, **kwargs):
sys.stderr.write('-- %s\n' % (message,))
result = f(*args, **kwargs)
sys.stderr.write(
'-- %s -- %s\n' % (message, if_yes if result else if_no))
return result
sub_func.pkgdist_config_decorated = True
return sub_func
return sub_decorator
def cache_check(cache_key):
"""Method decorate to cache check result."""
def sub_decorator(f):
def sub_func(self, *args, **kwargs):
if cache_key in self.cache:
return self.cache[cache_key]
result = f(self, *args, **kwargs)
self.cache[cache_key] = result
return result
sub_func.pkgdist_config_decorated = True
return sub_func
return sub_decorator
def check_define(define_name):
"""Method decorator to store check result."""
def sub_decorator(f):
@cache_check(define_name)
def sub_func(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self.check_defines[define_name] = result
return result
sub_func.pkgdist_config_decorated = True
return sub_func
return sub_decorator
class config(dst_config.config):
"""Perform platform checks for extension build."""
user_options = dst_config.config.user_options + [
("cache-path", "C", "path to read/write configuration cache"),
]
def initialize_options(self):
self.cache_path = None
self.build_base = None
dst_config.config.initialize_options(self)
def finalize_options(self):
if self.cache_path is None:
self.set_undefined_options(
'build',
('build_base', 'build_base'))
self.cache_path = os.path.join(self.build_base, 'config.cache')
dst_config.config.finalize_options(self)
def _cache_env_key(self):
return (self.cc, self.include_dirs, self.libraries, self.library_dirs)
@cache_check('_sanity_check')
@print_check('Performing basic C toolchain sanity check', 'works', 'broken')
def _sanity_check(self):
return self.try_link("int main(int argc, char *argv[]) { return 0; }")
def run(self):
from snakeoil.pickling import dump, load
# try to load the cached results
try:
with open(self.cache_path, 'rb') as f:
cache_db = load(f)
except (OSError, IOError):
cache_db = {}
else:
if self._cache_env_key() == cache_db.get('env_key'):
sys.stderr.write('-- Using cache from %s\n' % self.cache_path)
else:
sys.stderr.write('-- Build environment changed, discarding cache\n')
cache_db = {}
self.cache = cache_db.get('cache', {})
self.check_defines = {}
if not self._sanity_check():
sys.stderr.write('The C toolchain is unable to compile & link a simple C program!\n')
sys.exit(1)
# run all decorated methods
for k in dir(self):
if k.startswith('_'):
continue
if hasattr(getattr(self, k), 'pkgdist_config_decorated'):
getattr(self, k)()
# store results in Distribution instance
self.distribution.check_defines = self.check_defines
# store updated cache
cache_db = {
'cache': self.cache,
'env_key': self._cache_env_key(),
}
self.mkpath(os.path.dirname(self.cache_path))
with open(self.cache_path, 'wb') as f:
dump(cache_db, f)
# == methods for custom checks ==
def check_struct_member(self, typename, member, headers=None, include_dirs=None, lang="c"):
"""Check whether typename (must be struct or union) has the named member."""
return self.try_compile(
'int main() { %s x; (void) x.%s; return 0; }'
% (typename, member), headers, include_dirs, lang)
# yes these are in snakeoil.compatibility; we can't rely on that module however
# since snakeoil source is in 2k form, but this module is 2k/3k compatible.
# in other words, it could be invoked by py3k to translate snakeoil to py3k
is_py3k = sys.version_info >= (3, 0)
is_jython = 'java' in getattr(sys, 'getPlatform', lambda: '')().lower()
| {
"content_hash": "be58438f0da6c09e271beb8ae4cd0d5f",
"timestamp": "",
"source": "github",
"line_count": 1047,
"max_line_length": 105,
"avg_line_length": 35.08309455587393,
"alnum_prop": 0.5874441903517369,
"repo_name": "radhermit/pychroot",
"id": "977728317fd0cd5431d54b7ce7048598c52f20c4",
"size": "36867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkgdist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65064"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('assets', '0012_auto_20170105_0651'),
]
operations = [
migrations.AddField(
model_name='component',
name='install_instructions',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='component',
name='upload_instructions',
field=models.TextField(blank=True, null=True),
),
]
| {
"content_hash": "215b4545566dc71abf36a0c261e34410",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 58,
"avg_line_length": 25,
"alnum_prop": 0.5860869565217391,
"repo_name": "portnov/assethub",
"id": "8518366ac6a9df4f49825d33939059c9d22bd825",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assethub/assets/migrations/0013_auto_20170105_0749.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5879"
},
{
"name": "HTML",
"bytes": "29411"
},
{
"name": "Python",
"bytes": "131052"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
import unittest
import troposphere.rds as rds
class TestRDS(unittest.TestCase):
def test_it_allows_an_rds_instance_created_from_a_snapshot(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
DBSnapshotIdentifier='SomeSnapshotIdentifier'
)
rds_instance.JSONrepr()
def test_it_allows_an_rds_instance_with_master_username_and_password(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword'
)
rds_instance.JSONrepr()
def test_it_rds_instances_require_either_a_snapshot_or_credentials(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL'
)
with self.assertRaisesRegexp(
ValueError,
'Either \(MasterUsername and MasterUserPassword\) or'
' DBSnapshotIdentifier are required'
):
rds_instance.JSONrepr()
def test_it_allows_an_rds_replica(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier'
)
rds_instance.JSONrepr()
def test_replica_settings_are_inherited(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
SourceDBInstanceIdentifier='SomeSourceDBInstanceIdentifier',
BackupRetentionPeriod="1",
DBName="SomeName",
MasterUsername="SomeUsername",
MasterUserPassword="SomePassword",
PreferredBackupWindow="SomeBackupWindow",
MultiAZ=True,
DBSnapshotIdentifier="SomeDBSnapshotIdentifier",
DBSubnetGroupName="SomeDBSubnetGroupName",
)
with self.assertRaisesRegexp(
ValueError,
'BackupRetentionPeriod, DBName, DBSnapshotIdentifier, '
'DBSubnetGroupName, MasterUserPassword, MasterUsername, '
'MultiAZ, PreferredBackupWindow '
'properties can\'t be provided when '
'SourceDBInstanceIdentifier is present '
'AWS::RDS::DBInstance.'
):
rds_instance.JSONrepr()
def test_it_rds_instances_require_encryption_if_kms_key_provided(self):
rds_instance = rds.DBInstance(
'SomeTitle',
AllocatedStorage=1,
DBInstanceClass='db.m1.small',
Engine='MySQL',
MasterUsername='SomeUsername',
MasterUserPassword='SomePassword',
KmsKeyId='arn:aws:kms:us-east-1:123456789012:key/'
'12345678-1234-1234-1234-123456789012'
)
with self.assertRaisesRegexp(
ValueError,
'If KmsKeyId is provided, StorageEncrypted is required'
):
rds_instance.JSONrepr()
| {
"content_hash": "829ca6a471f76fef8083b12837abf5ad",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 34.17,
"alnum_prop": 0.5797483172373427,
"repo_name": "DualSpark/troposphere",
"id": "a7e63e96bf5eb2eb8b7f22a65d428cd02ece2d28",
"size": "3417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_rds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "239354"
}
],
"symlink_target": ""
} |
"""
PlotgroupPanels for displaying ProjectionSheet plotgroups.
"""
import ImageTk
### JCALERT! Try not to have to use chain and delete this import.
from itertools import chain
from Tkinter import Canvas
from numpy import sometrue
import param
import topo
from topo.base.cf import CFProjection
from topo.base.projection import ProjectionSheet, Projection
from topo.base.generatorsheet import GeneratorSheet
from templateplotgrouppanel import SheetPanel
def cmp_projections(p1,p2):
"""
Comparison function for Plots.
It compares the precedence number first and then the src_name and name attributes.
"""
if p1.src.precedence != p2.src.precedence:
return cmp(p1.src.precedence,p2.src.precedence)
else:
return cmp(p1,p2)
UNIT_PADDING = 1
class ProjectionSheetPanel(SheetPanel):
"""
Abstract base class for panels relating to ProjectionSheets.
"""
__abstract = True
sheet_type = ProjectionSheet
projection_type = Projection
@classmethod
def valid_context(cls):
"""
Return True if there is at least one instance of
projection_type among all projections in the simulation.
"""
for p in chain(*[sheet.projections().values()
for sheet in topo.sim.objects(cls.sheet_type).values()]):
if isinstance(p,cls.projection_type):
return True
return False
def __init__(self,master,plotgroup,**params):
super(ProjectionSheetPanel,self).__init__(master,plotgroup,**params)
self.plotgroup.auto_refresh=False
self.pack_param('sheet',parent=self.control_frame_3,
on_modify=self.sheet_change,side='left',expand=1,
widget_options={'new_default':True,
'sort_fn_args':{'cmp':lambda x, y: cmp(-x.precedence,-y.precedence)}})
def setup_plotgroup(self):
super(ProjectionSheetPanel,self).setup_plotgroup()
self.populate_sheet_param()
self.populate_color_channel_param()
def sheet_change(self):
self.refresh_plots()
def populate_sheet_param(self):
sheets = [s for s in topo.sim.objects(self.sheet_type).values()
if sometrue([isinstance(p,self.projection_type)
for p in s.in_connections])]
self.plotgroup.params()['sheet'].objects = sheets
self.plotgroup.sheet = sheets[0] # CB: necessary?
class ProjectionActivityPanel(ProjectionSheetPanel):
def __init__(self,master,plotgroup,**params):
super(ProjectionActivityPanel,self).__init__(master,plotgroup,**params)
self.auto_refresh = True
def _plot_title(self):
return "Activity in projections to %s at time %s"%(self.plotgroup.sheet.name,topo.sim.timestr(self.plotgroup.time))
class UnitsPanel(ProjectionSheetPanel):
def __init__(self,master,plotgroup,**params):
self.initial_args=params # CEBALERT: store the initial arguments so we can get sheet,x,y in
# sheet_change if any of them were specified. Isn't there a cleaner
# way?
super(UnitsPanel,self).__init__(master,plotgroup,**params)
self.pack_param('x',parent=self.control_frame_4,on_set=self.refresh_plots)
self.pack_param('y',parent=self.control_frame_4,on_set=self.refresh_plots)
self.sheet_change()
##############################################################################
# CEBALERT:
# - Need to couple taggedslider to a Number parameter in a better way
# somewhere else.
# - Clean up or document: passing the params, setting the bounds
#
# Also:
# e.g. bound on parameter is 0.5 but means <0.5, taggedslider
# still lets you set to 0.5 -> error
def sheet_change(self):
# CEBHACKALERT: get an inconsequential but scary
# cf-out-of-range error if you e.g. set y < -0.4 on sheet V1
# and then change to V2 (which has smaller bounds).
# x and y don't seem to be updated in time...
#self.x,self.y = 0.0,0.0
# CEBALERT: need to crop x,y (for e.g. going to smaller sheet) rather
# than set to 0
if 'sheet' in self.initial_args: self.sheet=self.initial_args['sheet']
for coord in ['x','y']:
self._tkvars[coord].set(self.initial_args.get(coord,0.0))
l,b,r,t = self.sheet.bounds.lbrt()
# CEBALERT: see "CEBERRORALERT: doesn't take account of
# exclusive bounds" in topo/param/__init.__.py.
D=0.0000000001
bounds = {'x':(l,r-D),
'y':(b,t-D)}
inclusive_bounds = {'x':(True,False), # GUI knows about exclusive sheet bounds
'y':(False,True)}
for coord in ['x','y']:
param_obj=self.get_parameter_object(coord)
param_obj.bounds = bounds[coord]
param_obj.inclusive_bounds = inclusive_bounds[coord]
# (method can be called before x,y widgets added)
if coord in self.representations:
w=self.representations[coord]['widget']
w.set_bounds(param_obj.bounds[0],param_obj.bounds[1],
inclusive_bounds=param_obj.inclusive_bounds)
w.tag_set()
self.initial_args = {} # reset now we've used them
super(UnitsPanel,self).sheet_change()
##############################################################################
class ConnectionFieldsPanel(UnitsPanel):
projection_type = CFProjection
def __init__(self,master,plotgroup,**params):
super(ConnectionFieldsPanel,self).__init__(master,plotgroup,**params)
self.pack_param('situate',parent=self.control_frame_3,on_set=self.situate_change,side='left',expand=1)
def situate_change(self):
self.redraw_plots()
def _plot_title(self):
return 'Connection Fields of %s unit (%4.3f,%4.3f) at time %s' % \
(self.sheet.name, self.plotgroup.x,self.plotgroup.y,
topo.sim.timestr(self.plotgroup.time))
class PlotMatrixPanel(ProjectionSheetPanel):
"""
PlotGroupPanel for visualizing an array of bitmaps, such as for
a projection involving a matrix of units.
"""
gui_desired_maximum_plot_height = param.Integer(default=5,bounds=(0,None),doc="""
Value to provide for PlotGroup.desired_maximum_plot_height for
PlotGroups opened by the GUI. Determines the initial, default
scaling for the PlotGroup.""")
def sheet_change(self): # don't want to refresh_plots (measure new data) each time
self.redraw_plots()
def refresh(self,update=True):
super(PlotMatrixPanel,self).refresh(update)
# take the size of the plot as the desired size
self.plotgroup.update_maximum_plot_height()
self.desired_maximum_plot_height = self.plotgroup.maximum_plot_height
def display_plots(self):
"""
CFProjectionPanel requires a 2D grid of plots.
"""
plots=self.plotgroup.plots
# Generate the zoomed images.
self.zoomed_images = [ImageTk.PhotoImage(p.bitmap.image)
for p in plots]
old_canvases = self.canvases
self.canvases = [Canvas(self.plot_container,
width=image.width(),
height=image.height(),
borderwidth=1,highlightthickness=0,
relief='groove')
for image in self.zoomed_images]
# Lay out images
for i,image,canvas in zip(range(len(self.zoomed_images)),
self.zoomed_images,self.canvases):
canvas.grid(row=i//self.plotgroup.proj_plotting_shape[1],
column=i%self.plotgroup.proj_plotting_shape[1],
padx=UNIT_PADDING,pady=UNIT_PADDING)
canvas.create_image(1,1,anchor='nw',image=image)
# Delete old ones. This may resize the grid.
for c in old_canvases:
c.grid_forget()
self._add_canvas_bindings()
def display_labels(self):
"""Do not display labels for these plots."""
pass
class RFProjectionPanel(PlotMatrixPanel):
sheet_type = ProjectionSheet
def __init__(self,master,plotgroup,**params):
super(RFProjectionPanel,self).__init__(master,plotgroup,**params)
self.pack_param('input_sheet',parent=self.control_frame_3,
on_modify=self.redraw_plots,side='left',expand=1)
self.pack_param('density',parent=self.control_frame_4)
def setup_plotgroup(self):
super(RFProjectionPanel,self).setup_plotgroup()
self.populate_input_sheet_param()
def populate_input_sheet_param(self):
sheets = topo.sim.objects(GeneratorSheet).values()
self.plotgroup.params()['input_sheet'].objects = sheets
self.plotgroup.input_sheet=sheets[0]
def _plot_title(self):
return 'RFs of %s on %s at time %s'%(self.sheet.name,self.plotgroup.input_sheet.name,
topo.sim.timestr(self.plotgroup.time))
class ProjectionPanel(PlotMatrixPanel):
def __init__(self,master,plotgroup,**params):
super(ProjectionPanel,self).__init__(master,plotgroup,**params)
self.pack_param('projection',parent=self.control_frame_3,
on_modify=self.redraw_plots,side='left',expand=1,
widget_options={'sort_fn_args':{'cmp':cmp_projections},
'new_default':True})
self.pack_param('density',parent=self.control_frame_4)
def _plot_title(self):
return self.projection.name + ' projection from ' + self.projection.src.name + ' to ' \
+ self.sheet.name + ' at time ' + topo.sim.timestr(self.plotgroup.time)
def setup_plotgroup(self):
super(ProjectionPanel,self).setup_plotgroup()
self.populate_projection_param()
def sheet_change(self):
self.refresh_projections()
super(ProjectionPanel,self).sheet_change()
def populate_projection_param(self):
prjns = [proj for proj in self.plotgroup.sheet.projections().values()
if isinstance(proj,self.projection_type)]
self.plotgroup.params()['projection'].objects = prjns
self.plotgroup.projection = prjns[0] # CB: necessary?
def refresh_projections(self):
self.populate_projection_param()
self.update_selector('projection')
## #################
## # CEBALERT: How do you change list of tkinter.optionmenu options? Use pmw's optionmenu?
## # Or search the web for a way to alter the list in the tkinter one.
## # Currently, replace widget completely: looks bad and is complex.
## # When fixing, remove try/except marked by the 'for projectionpanel' CEBALERT in
## # tkparameterizedobject.py.
## if 'projection' in self.representations:
## w = self.representations['projection']['widget']
## l = self.representations['projection']['label']
## l.destroy()
## w.destroy()
## self.pack_param('projection',parent=self.representations['projection']['frame'],
## on_modify=self.refresh_plots,side='left',expand=1,
## widget_options={'sort_fn_args':{'cmp':cmp_projections},
## 'new_default':True})
## #################
class CFProjectionPanel(ProjectionPanel):
"""
Panel for displaying CFProjections.
"""
projection_type = CFProjection
def __init__(self,master,plotgroup,**params):
super(CFProjectionPanel,self).__init__(master,plotgroup,**params)
self.pack_param('situate',parent=self.control_frame_3,on_set=self.situate_change,side='left',expand=1)
def situate_change(self):
self.redraw_plots()
## Need to add test file:
# check projection, sheet ordering
# check sheet changes, check projection changes
| {
"content_hash": "1781f3534f789705fdc43ca25626d213",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 123,
"avg_line_length": 35.4231884057971,
"alnum_prop": 0.6036330905817855,
"repo_name": "Tasignotas/topographica_mirror",
"id": "776b74de6ee47ecc11917aa9f5fd56d238708e3d",
"size": "12221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topo/tkgui/projectionpanel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "13313"
},
{
"name": "C++",
"bytes": "5745"
},
{
"name": "Elixir",
"bytes": "202"
},
{
"name": "JavaScript",
"bytes": "122"
},
{
"name": "Makefile",
"bytes": "15490"
},
{
"name": "Python",
"bytes": "1869234"
},
{
"name": "Shell",
"bytes": "1621"
},
{
"name": "TeX",
"bytes": "253834"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('place', '0009_auto_20141208_0426'),
('place', '0010_place_google_rating'),
]
operations = [
]
| {
"content_hash": "db803f7c829c9efa5cc63355b713d246",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 46,
"avg_line_length": 19.285714285714285,
"alnum_prop": 0.6333333333333333,
"repo_name": "pizzapanther/Localvore",
"id": "737027c36ffc197ca74f4462d2aaadbc115b70e1",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django-app/place/migrations/0011_merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "607136"
},
{
"name": "CoffeeScript",
"bytes": "3263"
},
{
"name": "JavaScript",
"bytes": "74606"
},
{
"name": "Python",
"bytes": "38363"
},
{
"name": "Shell",
"bytes": "4789"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: 3df3378fa9b0
Revises: 45f66908a95b
Create Date: 2015-04-10 16:04:32.131733
"""
# revision identifiers, used by Alembic.
revision = '3df3378fa9b0'
down_revision = '45f66908a95b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('record', sa.Column('ndvi_modis', postgresql.ARRAY(sa.Integer()), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('record', 'ndvi_modis')
### end Alembic commands ###
| {
"content_hash": "4c408a9ce66a25bd9db2f09f368e3af1",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 99,
"avg_line_length": 25.884615384615383,
"alnum_prop": 0.7057949479940565,
"repo_name": "justinwp/croplands",
"id": "f68f9aa43393c231cc7378856264d986cabf3c23",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/3df3378fa9b0_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21974"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PLpgSQL",
"bytes": "553"
},
{
"name": "Python",
"bytes": "236431"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.sites.models import Site
from djblets.siteconfig.models import SiteConfiguration
from reviewboard import get_version_string
from reviewboard.admin.siteconfig import settings_map, defaults
def init_siteconfig(app, created_models, verbosity, **kwargs):
"""
Initializes the site configuration with the current version of the
software.
"""
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
# This is an initial syncdb and we got called before Site's post_syncdb
# handler did, so invoke it directly.
from django.contrib.sites.management import create_default_site
create_default_site(app, created_models, verbosity)
site = Site.objects.get_current()
siteconfig, is_new = SiteConfiguration.objects.get_or_create(site=site)
new_version = get_version_string()
if is_new:
# Check the Site to see if this is a brand new installation. If so,
# don't talk to the user about upgrades or other such nonsense.
if Site not in created_models:
print "*** Migrating settings from settings_local.py to the " \
"database."
migrate_settings(siteconfig)
if Site not in created_models:
print "*** If you have previously configured Review Board " \
"through a "
print "*** settings_local.py file, please ensure that the " \
"migration "
print "*** was successful by verifying your settings at"
print "*** %s://%s%sadmin/settings/" % \
(siteconfig.get("site_domain_method"),
site.domain,
settings.SITE_ROOT)
siteconfig.version = new_version
siteconfig.save()
elif siteconfig.version != new_version:
print "Upgrading Review Board from %s to %s" % (siteconfig.version,
new_version)
siteconfig.version = new_version
siteconfig.save()
# TODO: Someday we'll want to enable this code when we actually
# have something to tell them.
#print "*** You are upgrading Review Board from v%s to v%s" % \
# (siteconfig.version, new_version)
#print "*** To complete the upgrade, please follow the instructions at:"
#print "*** %s://%s%s" % (siteconfig.get("site_domain_method"),
# site.domain, settings.SITE_ROOT)
migration_table = {
# new settings key # old settings key
'auth_require_sitewide_login': 'REQUIRE_SITEWIDE_LOGIN',
'diffviewer_context_num_lines': 'DIFF_CONTEXT_NUM_LINES',
'diffviewer_include_space_patterns': 'DIFF_INCLUDE_SPACE_PATTERNS',
'diffviewer_paginate_by': 'DIFFVIEWER_PAGINATE_BY',
'diffviewer_paginate_orphans': 'DIFFVIEWER_PAGINATE_ORPHANS',
'diffviewer_syntax_highlighting': 'DIFF_SYNTAX_HIGHLIGHTING',
'mail_send_review_mail': 'SEND_REVIEW_MAIL',
'search_enable': 'ENABLE_SEARCH',
'search_index_file': 'SEARCH_INDEX',
}
migration_table.update(settings_map)
auth_backend_map = {
'django.contrib.auth.backends.ModelBackend': 'builtin',
'reviewboard.accounts.backends.NISBackend': 'nis',
'reviewboard.accounts.backends.LDAPBackend': 'ldap',
}
def migrate_settings(siteconfig):
"""
Migrates any settings we want in the database from the settings file.
"""
# Convert everything in the table.
for siteconfig_key, setting_data in migration_table.iteritems():
if isinstance(setting_data, dict):
setting_key = setting_data['key']
serialize_func = setting_data.get('serialize_func', None)
else:
setting_key = setting_data
serialize_func = None
default = defaults.get(siteconfig_key, None)
value = getattr(settings, setting_key, default)
if serialize_func and callable(serialize_func):
value = serialize_func(value)
siteconfig.set(siteconfig_key, value)
# This may be a tuple in a tuple, or it may just be a tuple.
if type(settings.ADMINS[0]) == tuple:
admin = settings.ADMINS[0]
else:
admin = settings.ADMINS
siteconfig.set('site_admin_name', admin[0])
siteconfig.set('site_admin_email', admin[1])
# Try to transform the authentication backend
remaining_backends = []
known_backends = []
for auth_backend in settings.AUTHENTICATION_BACKENDS:
if auth_backend in auth_backend_map:
known_backends.append(auth_backend)
else:
remaining_backends.append(auth_backend)
if remaining_backends or len(known_backends) > 1:
# The user has some custom backend set. Just set the entire list
siteconfig.set('auth_backend', 'custom')
siteconfig.set('auth_custom_backends',
settings.AUTHENTICATION_BACKENDS)
elif len(known_backends) == 1:
siteconfig.set('auth_backend', auth_backend_map[known_backends[0]])
else:
siteconfig.set('auth_backend', 'builtin')
| {
"content_hash": "a7f840834616992378531925f2b8c177",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 80,
"avg_line_length": 38.762962962962966,
"alnum_prop": 0.6239250907701127,
"repo_name": "asutherland/opc-reviewboard",
"id": "40e2553e5db51a2a7f5c9209716bf452c934b947",
"size": "5233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/admin/management/sites.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "101161"
},
{
"name": "JavaScript",
"bytes": "232746"
},
{
"name": "Python",
"bytes": "687315"
},
{
"name": "Shell",
"bytes": "1616"
}
],
"symlink_target": ""
} |
import six
import shlex
from . import base
class Dmenu(base.RunCommand):
"""
Python wrapper for dmenu
http://tools.suckless.org/dmenu/
"""
defaults = [
("dmenu_font", None, "override the default 'font' and 'fontsize' options for dmenu"),
# NOTE: Do not use a list as a default value, since it would be shared
# among all the objects inheriting this class, and if one of them
# modified it, all the other objects would see the modified list;
# use a string or a tuple instead, which are immutable
("dmenu_command", 'dmenu', "the dmenu command to be launched"),
("dmenu_bottom", False, "dmenu appears at the bottom of the screen"),
("dmenu_ignorecase", False, "dmenu matches menu items case insensitively"),
("dmenu_lines", None, "dmenu lists items vertically, with the given number of lines"),
("dmenu_prompt", None, "defines the prompt to be displayed to the left of the input field"),
("dmenu_height", None, "defines the height (only supported by some dmenu forks)"),
]
def __init__(self, **config):
base.RunCommand.__init__(self, **config)
self.add_defaults(Dmenu.defaults)
def _configure(self, qtile):
base.RunCommand._configure(self, qtile)
dmenu_command = self.dmenu_command or self.command
if isinstance(dmenu_command, str):
self.configured_command = shlex.split(dmenu_command)
else:
# Create a clone of dmenu_command, don't use it directly since
# it's shared among all the instances of this class
self.configured_command = list(dmenu_command)
if self.dmenu_bottom:
self.configured_command.append("-b")
if self.dmenu_ignorecase:
self.configured_command.append("-i")
if self.dmenu_lines:
self.configured_command.extend(("-l", str(self.dmenu_lines)))
if self.dmenu_prompt:
self.configured_command.extend(("-p", self.dmenu_prompt))
if self.dmenu_font:
font = self.dmenu_font
elif self.font:
if self.fontsize:
font = '{}-{}'.format(self.font, self.fontsize)
else:
font = self.font
self.configured_command.extend(("-fn", font))
if self.background:
self.configured_command.extend(("-nb", self.background))
if self.foreground:
self.configured_command.extend(("-nf", self.foreground))
if self.selected_background:
self.configured_command.extend(("-sb", self.selected_background))
if self.selected_foreground:
self.configured_command.extend(("-sf", self.selected_foreground))
# NOTE: The original dmenu doesn't support the '-h' option
if self.dmenu_height:
self.configured_command.extend(("-h", str(self.dmenu_height)))
def run(self, items=None):
if items:
if self.dmenu_lines:
lines = min(len(items), self.dmenu_lines)
else:
lines = len(items)
self.configured_command.extend(("-l", str(lines)))
proc = super(Dmenu, self).run()
if items:
input_str = "\n".join([six.u(i) for i in items]) + "\n"
return proc.communicate(str.encode(input_str))[0].decode('utf-8')
return proc
class DmenuRun(Dmenu):
"""
Special case to run applications.
config.py should have something like:
.. code-block:: python
from libqtile import extension
keys = [
Key(['mod4'], 'r', lazy.run_extension(extension.DmenuRun(
dmenu_prompt=">",
dmenu_font="Andika-8",
background="#15181a",
foreground="#00ff00",
selected_background="#079822",
selected_foreground="#fff",
dmenu_height=24, # Only supported by some dmenu forks
))),
]
"""
defaults = [
("dmenu_command", 'dmenu_run', "the dmenu command to be launched"),
]
def __init__(self, **config):
Dmenu.__init__(self, **config)
self.add_defaults(DmenuRun.defaults)
def _configure(self, qtile):
Dmenu._configure(self, qtile)
class J4DmenuDesktop(Dmenu):
"""
Python wrapper for j4-dmenu-desktop
https://github.com/enkore/j4-dmenu-desktop
"""
defaults = [
("j4dmenu_command", 'j4-dmenu-desktop', "the dmenu command to be launched"),
("j4dmenu_use_xdg_de", False, "read $XDG_CURRENT_DESKTOP to determine the desktop environment"),
("j4dmenu_display_binary", False, "display binary name after each entry"),
("j4dmenu_generic", True, "include the generic name of desktop entries"),
("j4dmenu_terminal", None, "terminal emulator used to start terminal apps"),
("j4dmenu_usage_log", None, "file used to sort items by usage frequency"),
]
def __init__(self, **config):
Dmenu.__init__(self, **config)
self.add_defaults(J4DmenuDesktop.defaults)
def _configure(self, qtile):
Dmenu._configure(self, qtile)
self.configured_command = [self.j4dmenu_command, '--dmenu',
" ".join(shlex.quote(arg) for arg in self.configured_command)]
if self.j4dmenu_use_xdg_de:
self.configured_command.append("--use-xdg-de")
if self.j4dmenu_display_binary:
self.configured_command.append("--display-binary")
if not self.j4dmenu_generic:
self.configured_command.append("--no-generic")
if self.j4dmenu_terminal:
self.configured_command.extend(("--term", self.j4dmenu_terminal))
if self.j4dmenu_usage_log:
self.configured_command.extend(("--usage-log",
self.j4dmenu_usage_log))
| {
"content_hash": "78a3fc1a222a810d574abe1dd984aa3d",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 104,
"avg_line_length": 37.87261146496815,
"alnum_prop": 0.5893037336024218,
"repo_name": "cortesi/qtile",
"id": "167b7e0d188c79f2a82f34fb3cab47186f096b58",
"size": "7038",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libqtile/extension/dmenu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1170921"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5643"
}
],
"symlink_target": ""
} |
import json
import pytest
from telegram import (InlineQueryResultCachedMpeg4Gif, InlineKeyboardButton,
InputTextMessageContent, InlineKeyboardMarkup, InlineQueryResultCachedVoice)
@pytest.fixture(scope='class')
def inline_query_result_cached_mpeg4_gif():
return InlineQueryResultCachedMpeg4Gif(TestInlineQueryResultCachedMpeg4Gif.id,
TestInlineQueryResultCachedMpeg4Gif.mpeg4_file_id,
title=TestInlineQueryResultCachedMpeg4Gif.title,
caption=TestInlineQueryResultCachedMpeg4Gif.caption,
input_message_content=TestInlineQueryResultCachedMpeg4Gif.input_message_content,
reply_markup=TestInlineQueryResultCachedMpeg4Gif.reply_markup)
class TestInlineQueryResultCachedMpeg4Gif:
id = 'id'
type = 'mpeg4_gif'
mpeg4_file_id = 'mpeg4 file id'
title = 'title'
caption = 'caption'
input_message_content = InputTextMessageContent('input_message_content')
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_cached_mpeg4_gif):
assert inline_query_result_cached_mpeg4_gif.type == self.type
assert inline_query_result_cached_mpeg4_gif.id == self.id
assert inline_query_result_cached_mpeg4_gif.mpeg4_file_id == self.mpeg4_file_id
assert inline_query_result_cached_mpeg4_gif.title == self.title
assert inline_query_result_cached_mpeg4_gif.caption == self.caption
assert inline_query_result_cached_mpeg4_gif.input_message_content.to_dict() == \
self.input_message_content.to_dict()
assert inline_query_result_cached_mpeg4_gif.reply_markup.to_dict() == \
self.reply_markup.to_dict()
def test_to_json(self, inline_query_result_cached_mpeg4_gif):
json.loads(inline_query_result_cached_mpeg4_gif.to_json())
def test_to_dict(self, inline_query_result_cached_mpeg4_gif):
inline_query_result_cached_mpeg4_gif_dict = inline_query_result_cached_mpeg4_gif.to_dict()
assert isinstance(inline_query_result_cached_mpeg4_gif_dict, dict)
assert inline_query_result_cached_mpeg4_gif_dict['type'] == \
inline_query_result_cached_mpeg4_gif.type
assert inline_query_result_cached_mpeg4_gif_dict['id'] == \
inline_query_result_cached_mpeg4_gif.id
assert inline_query_result_cached_mpeg4_gif_dict['mpeg4_file_id'] == \
inline_query_result_cached_mpeg4_gif.mpeg4_file_id
assert inline_query_result_cached_mpeg4_gif_dict['title'] == \
inline_query_result_cached_mpeg4_gif.title
assert inline_query_result_cached_mpeg4_gif_dict['caption'] == \
inline_query_result_cached_mpeg4_gif.caption
assert inline_query_result_cached_mpeg4_gif_dict['input_message_content'] == \
inline_query_result_cached_mpeg4_gif.input_message_content.to_dict()
assert inline_query_result_cached_mpeg4_gif_dict['reply_markup'] == \
inline_query_result_cached_mpeg4_gif.reply_markup.to_dict()
def test_equality(self):
a = InlineQueryResultCachedMpeg4Gif(self.id, self.mpeg4_file_id)
b = InlineQueryResultCachedMpeg4Gif(self.id, self.mpeg4_file_id)
c = InlineQueryResultCachedMpeg4Gif(self.id, "")
d = InlineQueryResultCachedMpeg4Gif("", self.mpeg4_file_id)
e = InlineQueryResultCachedVoice(self.id, "", "")
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| {
"content_hash": "62238a3423d6b02efa1048241fc97299",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 123,
"avg_line_length": 50.08860759493671,
"alnum_prop": 0.639373262572656,
"repo_name": "rogerscristo/BotFWD",
"id": "ff66807772485678c994559ed713d694e330a78e",
"size": "4787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/pytests/test_inlinequeryresultcachedmpeg4gif.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13999"
}
],
"symlink_target": ""
} |
import sys
from EPPs.common import StepEPP
from pyclarity_lims.entities import Protocol
class AssignNextStepKAPAqPCR(StepEPP):
"""
This script assigns the next step for samples based on the QC flag of the input artifact and the step UDF
standard curve result.
"""
_use_load_config = False # prevent the loading of the config
def _run(self):
# obtain the actions of the step then creates a StepActions entity for the current step
actions = self.process.step.actions
# obtain the next actions in the step then creates a list of dict for next_actions for the step
next_actions = actions.next_actions
# abort script if the Standard Curve has not been created
if not self.process.udf.get('Standard Curve Result'):
print('No value in step UDF "Standard Curve Result". Please complete result parsing and linear regression.')
sys.exit(1)
else:
for next_action in next_actions:
art = next_action['artifact']
if art.name.split(' ')[0] == 'QSTD' or art.name.split(' ')[0] == 'No':
# standards and no template control should never proceed to the next step or repeat
next_action['action'] = 'remove'
else:
if self.process.udf.get('Standard Curve Result') == 'Repeat Make and Read qPCR Quant':
# check if the Standard Curve passed QC. If not then step should be repeated
next_action['action'] = 'repeat'
elif self.process.udf.get('Standard Curve Result') == 'Pass QSTD Curve':
current_step = self.process.step.configuration # configuration gives the ProtocolStep entity.
protocol = Protocol(self.process.lims,
uri='/'.join(self.process.step.configuration.uri.split('/')[:-2]))
steps = protocol.steps # a list of all the ProtocolSteps in protocol
step_object = steps[steps.index(current_step) + 1] # find the next step
next_action['action'] = 'nextstep'
next_action['step'] = step_object
actions.put()
if __name__ == '__main__':
AssignNextStepKAPAqPCR().run()
| {
"content_hash": "7d3fb460575f35b8c8624d086a9d151c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 120,
"avg_line_length": 46.13725490196079,
"alnum_prop": 0.589035274118147,
"repo_name": "EdinburghGenomics/clarity_scripts",
"id": "3fc8f2611f482565a3b26d41b33da09a78ae0af8",
"size": "2375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/next_step_assignment_kapa_qpcr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2317"
},
{
"name": "Python",
"bytes": "602935"
}
],
"symlink_target": ""
} |
from system.core.controller import *
from random import randint
from time import strftime
class Login_Reg(Controller):
def __init__(self, action):
super(Login_Reg, self).__init__(action)
self.load_model('Login_Reg')
self.db = self._app.db
def index(self):
return self.load_view('index.html')
def login(self):
return redirect("/success")
def register(self):
session['fname'] = request.form['fname']
session['lname'] = request.form['lname']
session['email'] = request.form['email']
user_info = {
"fname" : request.form['fname'],
"lname" : request.form['lname'],
"email" : request.form['email'],
"password" : request.form['password'],
"conf_password" : request.form['conf_password']
}
create_status = self.models['Login_Reg'].create_user(user_info)
if create_status['status']:
return redirect("/success")
for error in create_status['errors']:
flash(error,'register')
return redirect("/")
def login(self):
session['email'] = request.form['email']
info= {
"email": request.form['email'],
"password": request.form['password']
}
errors = self.models['Login_Reg'].login_valid(info)
if not errors['status']:
for error in errors['errors']:
flash(error,"login")
return redirect("/")
return redirect("/success")
def success(self):
return self.load_view('success.html')
def logout(self):
session.clear()
return redirect("/")
| {
"content_hash": "7e74291bef926a973ca9ce60b6b6acd3",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 71,
"avg_line_length": 31.339622641509433,
"alnum_prop": 0.5611077664057796,
"repo_name": "authman/Python201609",
"id": "87414dcb7857560332b39348d9b4658cbb9aa4eb",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wright_Will/Assignments/login_and_registration_pylot/Pylot/app/controllers/Login_Reg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1231"
},
{
"name": "C",
"bytes": "430679"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "22689"
},
{
"name": "HTML",
"bytes": "168012"
},
{
"name": "JavaScript",
"bytes": "3734"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "590654"
},
{
"name": "Shell",
"bytes": "9350"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import unittest
from unittest import mock
import pytest
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.hooks.telegram import TelegramHook
from airflow.utils import db
TELEGRAM_TOKEN = "dummy token"
class TestTelegramHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id="telegram-webhook-without-token",
conn_type="http",
)
)
db.merge_conn(
Connection(
conn_id="telegram_default",
conn_type="http",
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id="telegram-webhook-with-chat_id",
conn_type="http",
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_raise_exception_if_both_connection_or_token_is_not_provided(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramHook()
assert "Cannot get token: No valid Telegram connection supplied." == str(ctx.value)
def test_should_raise_exception_if_conn_id_doesnt_exist(self):
with pytest.raises(airflow.exceptions.AirflowNotFoundException) as ctx:
TelegramHook(telegram_conn_id="telegram-webhook-non-existent")
assert "The conn_id `telegram-webhook-non-existent` isn't defined" == str(ctx.value)
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramHook(telegram_conn_id="telegram-webhook-without-token")
assert "Missing token(password) in Telegram connection" == str(ctx.value)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
hook = TelegramHook(telegram_conn_id="telegram_default")
hook.send_message({"text": "test telegram message"})
assert "'chat_id' must be provided for telegram message" == str(ctx.value)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
hook = TelegramHook(telegram_conn_id="telegram_default")
hook.send_message({"chat_id": -420913222})
assert "'text' must be provided for telegram message" == str(ctx.value)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default")
hook.send_message({"chat_id": -420913222, "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": -420913222,
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram_default", chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": -420913222,
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = mock.Mock(password="some_token")
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_get_conn.return_value.send_message.side_effect = side_effect
with pytest.raises(Exception) as ctx:
hook = TelegramHook(telegram_conn_id="telegram-webhook-with-chat_id")
hook.send_message({"text": "test telegram message"})
assert "RetryError" in str(ctx.value)
assert "state=finished raised TelegramError" in str(ctx.value)
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
"chat_id": "-420913222",
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
assert excepted_retry_count == mock_get_conn.return_value.send_message.call_count
@mock.patch("airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn")
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
"chat_id": -420913222,
"parse_mode": "HTML",
"disable_web_page_preview": True,
"text": "test telegram message",
}
)
| {
"content_hash": "b0addaa329c4b62c27c86f802768463d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 97,
"avg_line_length": 40.31073446327684,
"alnum_prop": 0.6267694463910302,
"repo_name": "nathanielvarona/airflow",
"id": "a915f6d3285ee4a22352abd34d859b8fdf52d522",
"size": "7922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/telegram/hooks/test_telegram.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "70681"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173025"
},
{
"name": "JavaScript",
"bytes": "142848"
},
{
"name": "Jinja",
"bytes": "38895"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23169682"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211967"
},
{
"name": "TypeScript",
"bytes": "484556"
}
],
"symlink_target": ""
} |
import collections
import copy
import json
import os
import pipes
import re
import subprocess
import sys
import bb_utils
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pylib import constants
CHROMIUM_COVERAGE_BUCKET = 'chromium-code-coverage'
_BotConfig = collections.namedtuple(
'BotConfig', ['bot_id', 'host_obj', 'test_obj'])
HostConfig = collections.namedtuple(
'HostConfig',
['script', 'host_steps', 'extra_args', 'extra_gyp_defines', 'target_arch'])
TestConfig = collections.namedtuple('Tests', ['script', 'tests', 'extra_args'])
def BotConfig(bot_id, host_object, test_object=None):
return _BotConfig(bot_id, host_object, test_object)
def DictDiff(d1, d2):
diff = []
for key in sorted(set(d1.keys() + d2.keys())):
if key in d1 and d1[key] != d2.get(key):
diff.append('- %s=%s' % (key, pipes.quote(d1[key])))
if key in d2 and d2[key] != d1.get(key):
diff.append('+ %s=%s' % (key, pipes.quote(d2[key])))
return '\n'.join(diff)
def GetEnvironment(host_obj, testing, extra_env_vars=None):
init_env = dict(os.environ)
init_env['GYP_GENERATORS'] = 'ninja'
init_env['GOMA_DIR'] = bb_utils.GOMA_DIR
if extra_env_vars:
init_env.update(extra_env_vars)
envsetup_cmd = '. build/android/envsetup.sh'
if host_obj.target_arch:
envsetup_cmd += ' --target-arch=%s' % host_obj.target_arch
if testing:
# Skip envsetup to avoid presubmit dependence on android deps.
print 'Testing mode - skipping "%s"' % envsetup_cmd
envsetup_cmd = ':'
else:
print 'Running %s' % envsetup_cmd
proc = subprocess.Popen(['bash', '-exc',
envsetup_cmd + ' >&2; python build/android/buildbot/env_to_json.py'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=bb_utils.CHROME_SRC, env=init_env)
json_env, envsetup_output = proc.communicate()
if proc.returncode != 0:
print >> sys.stderr, 'FATAL Failure in envsetup.'
print >> sys.stderr, envsetup_output
sys.exit(1)
env = json.loads(json_env)
env['GYP_DEFINES'] = env.get('GYP_DEFINES', '') + ' fastbuild=1'
extra_gyp = host_obj.extra_gyp_defines
if extra_gyp:
env['GYP_DEFINES'] += ' %s' % extra_gyp
if re.search('(asan|clang)=1', extra_gyp):
env.pop('CXX_target', None)
# Bots checkout chrome in /b/build/slave/<name>/build/src
build_internal_android = os.path.abspath(os.path.join(
bb_utils.CHROME_SRC, '..', '..', '..', '..', '..', 'build_internal',
'scripts', 'slave', 'android'))
if os.path.exists(build_internal_android):
env['PATH'] = os.pathsep.join([build_internal_android, env['PATH']])
return env
def GetCommands(options, bot_config):
"""Get a formatted list of commands.
Args:
options: Options object.
bot_config: A BotConfig named tuple.
host_step_script: Host step script.
device_step_script: Device step script.
Returns:
list of Command objects.
"""
property_args = bb_utils.EncodeProperties(options)
commands = [[bot_config.host_obj.script,
'--steps=%s' % ','.join(bot_config.host_obj.host_steps)] +
property_args + (bot_config.host_obj.extra_args or [])]
test_obj = bot_config.test_obj
if test_obj:
run_test_cmd = [test_obj.script, '--reboot'] + property_args
for test in test_obj.tests:
run_test_cmd.extend(['-f', test])
if test_obj.extra_args:
run_test_cmd.extend(test_obj.extra_args)
commands.append(run_test_cmd)
return commands
def GetBotStepMap():
compile_step = ['compile']
std_host_tests = ['check_webview_licenses', 'findbugs']
std_build_steps = ['compile', 'zip_build']
std_test_steps = ['extract_build']
std_tests = ['ui', 'unit']
flakiness_server = (
'--flakiness-server=%s' % constants.UPSTREAM_FLAKINESS_SERVER)
experimental = ['--experimental']
B = BotConfig
H = (lambda steps, extra_args=None, extra_gyp=None, target_arch=None :
HostConfig('build/android/buildbot/bb_host_steps.py', steps, extra_args,
extra_gyp, target_arch))
T = (lambda tests, extra_args=None :
TestConfig('build/android/buildbot/bb_device_steps.py', tests,
extra_args))
bot_configs = [
# Main builders
B('main-builder-dbg', H(std_build_steps + std_host_tests)),
B('main-builder-rel', H(std_build_steps)),
B('main-clang-builder',
H(compile_step, extra_gyp='clang=1 component=shared_library')),
B('main-clobber', H(compile_step)),
B('main-tests', H(std_test_steps), T(std_tests, [flakiness_server])),
# Other waterfalls
B('asan-builder-tests', H(compile_step, extra_gyp='asan=1'),
T(std_tests, ['--asan'])),
B('blink-try-builder', H(compile_step)),
B('chromedriver-fyi-tests-dbg', H(std_test_steps),
T(['chromedriver'], ['--install=ChromiumTestShell'])),
B('fyi-x86-builder-dbg',
H(compile_step + std_host_tests, experimental, target_arch='x86')),
B('fyi-builder-dbg',
H(std_build_steps + std_host_tests, experimental,
extra_gyp='emma_coverage=1')),
B('x86-builder-dbg',
H(compile_step + std_host_tests, target_arch='x86')),
B('fyi-builder-rel', H(std_build_steps, experimental)),
B('fyi-tests', H(std_test_steps),
T(std_tests, ['--experimental', flakiness_server,
'--coverage-bucket', CHROMIUM_COVERAGE_BUCKET])),
B('fyi-component-builder-tests-dbg',
H(compile_step, extra_gyp='component=shared_library'),
T(std_tests, ['--experimental', flakiness_server])),
B('gpu-builder-tests-dbg', H(compile_step), T(['gpu'])),
B('perf-bisect-builder-tests-dbg', H(['bisect_perf_regression'])),
B('perf-tests-rel', H(std_test_steps),
T([], ['--install=ChromiumTestShell'])),
B('webkit-latest-webkit-tests', H(std_test_steps),
T(['webkit_layout', 'webkit'], ['--auto-reconnect'])),
B('webkit-latest-contentshell', H(compile_step),
T(['webkit_layout'], ['--auto-reconnect'])),
B('builder-unit-tests', H(compile_step), T(['unit'])),
B('webrtc-builder',
H(std_build_steps,
extra_args=['--build-targets=android_builder_webrtc'],
extra_gyp='include_tests=1 enable_tracing=1')),
B('webrtc-tests', H(['download_webrtc_resources'] + std_test_steps),
T(['webrtc'], [flakiness_server])),
# Generic builder config (for substring match).
B('builder', H(std_build_steps)),
]
bot_map = dict((config.bot_id, config) for config in bot_configs)
# These bots have identical configuration to ones defined earlier.
copy_map = [
('lkgr-clobber', 'main-clobber'),
('try-builder-dbg', 'main-builder-dbg'),
('try-builder-rel', 'main-builder-rel'),
('try-clang-builder', 'main-clang-builder'),
('try-fyi-builder-dbg', 'fyi-builder-dbg'),
('try-x86-builder-dbg', 'x86-builder-dbg'),
('try-tests', 'main-tests'),
('try-fyi-tests', 'fyi-tests'),
('webkit-latest-tests', 'main-tests'),
]
for to_id, from_id in copy_map:
assert to_id not in bot_map
# pylint: disable=W0212
bot_map[to_id] = copy.deepcopy(bot_map[from_id])._replace(bot_id=to_id)
# Trybots do not upload to flakiness dashboard. They should be otherwise
# identical in configuration to their trunk building counterparts.
test_obj = bot_map[to_id].test_obj
if to_id.startswith('try') and test_obj:
extra_args = test_obj.extra_args
if extra_args and flakiness_server in extra_args:
extra_args.remove(flakiness_server)
return bot_map
# Return an object from the map, looking first for an exact id match.
# If this fails, look for an id which is a substring of the specified id.
# Choose the longest of all substring matches.
# pylint: disable=W0622
def GetBestMatch(id_map, id):
config = id_map.get(id)
if not config:
substring_matches = filter(lambda x: x in id, id_map.iterkeys())
if substring_matches:
max_id = max(substring_matches, key=len)
print 'Using config from id="%s" (substring match).' % max_id
config = id_map[max_id]
return config
def GetRunBotOptParser():
parser = bb_utils.GetParser()
parser.add_option('--bot-id', help='Specify bot id directly.')
parser.add_option('--testing', action='store_true',
help='For testing: print, but do not run commands')
return parser
def GetBotConfig(options, bot_step_map):
bot_id = options.bot_id or options.factory_properties.get('android_bot_id')
if not bot_id:
print (sys.stderr,
'A bot id must be specified through option or factory_props.')
return
bot_config = GetBestMatch(bot_step_map, bot_id)
if not bot_config:
print 'Error: config for id="%s" cannot be inferred.' % bot_id
return bot_config
def RunBotCommands(options, commands, env):
print 'Environment changes:'
print DictDiff(dict(os.environ), env)
for command in commands:
print bb_utils.CommandToString(command)
sys.stdout.flush()
if options.testing:
env['BUILDBOT_TESTING'] = '1'
return_code = subprocess.call(command, cwd=bb_utils.CHROME_SRC, env=env)
if return_code != 0:
return return_code
def main(argv):
parser = GetRunBotOptParser()
options, args = parser.parse_args(argv[1:])
if args:
parser.error('Unused args: %s' % args)
bot_config = GetBotConfig(options, GetBotStepMap())
if not bot_config:
sys.exit(1)
print 'Using config:', bot_config
commands = GetCommands(options, bot_config)
for command in commands:
print 'Will run: ', bb_utils.CommandToString(command)
print
env = GetEnvironment(bot_config.host_obj, options.testing)
return RunBotCommands(options, commands, env)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "fdf37349ca0af342bf80d942fc205e43",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 79,
"avg_line_length": 35.086021505376344,
"alnum_prop": 0.6426601287159056,
"repo_name": "mogoweb/chromium-crosswalk",
"id": "dbe453a8f03c03cd809fe8dcf1861e1f11a648c0",
"size": "9980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/buildbot/bb_run_bot.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "54831"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40940503"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "182703853"
},
{
"name": "CSS",
"bytes": "799795"
},
{
"name": "DOT",
"bytes": "1873"
},
{
"name": "Java",
"bytes": "4807735"
},
{
"name": "JavaScript",
"bytes": "20714038"
},
{
"name": "Mercury",
"bytes": "10299"
},
{
"name": "Objective-C",
"bytes": "985558"
},
{
"name": "Objective-C++",
"bytes": "6205987"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1213389"
},
{
"name": "Python",
"bytes": "9735121"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1305641"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
} |
"""
pyiters
"""
from copy import copy
from itertools import dropwhile
__all__ = [
"ndropwhile",
"iproduct",
"callfor",
]
def ndropwhile(predicate, iterable):
return dropwhile(lambda x: not predicate(x), iterable)
def _iproduct(*iterables):
if len(iterables) == 1:
for i in copy(iterables[0]):
yield [i]
else:
for i in copy(iterables[0]):
for j in _iproduct(*iterables[1:]):
yield [i] + j
def iproduct(*iterables, repeat=1):
if repeat > 1:
iterables = [i for _ in range(repeat) for i in iterables]
for i in _iproduct(*iterables):
yield tuple(i)
def callfor(func, iterable, *args, **kwargs):
for i in iterable:
func(i, *args, **kwargs)
| {
"content_hash": "e832caec32fe19aaab6dc55dcef7f728",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 65,
"avg_line_length": 20.105263157894736,
"alnum_prop": 0.5785340314136126,
"repo_name": "odd-even/pyiters",
"id": "2c4123e664ae2a70667d5647b5c022c8988c3026",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyiters/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "788"
}
],
"symlink_target": ""
} |
from facette.utils import *
import json
GROUP_ENTRY_ORIGIN = "origin"
GROUP_ENTRY_PATTERN = "pattern"
class GroupEntry:
def __init__(self, js=""):
self.entry = {}
self.origin = facette_to_json(GROUP_ENTRY_ORIGIN, js, self.entry)
self.pattern = facette_to_json(GROUP_ENTRY_PATTERN, js, self.entry)
def set(self, origin=None, pattern=None):
self.origin = facette_set(id, GROUP_ENTRY_ORIGIN, self.entry)
self.pattern = facette_set(id, GROUP_ENTRY_PATTERN, self.entry)
def __str__(self):
return json.dumps(self.entry)
def __repr__(self):
return str(self)
| {
"content_hash": "8e5e551d82c814e072a37d298c46b60d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.6350710900473934,
"repo_name": "OpenTouch/python-facette",
"id": "6f9dca666f12eef11400581157c6e9ed398d43ad",
"size": "1278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/facette/v1/groupentry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55694"
}
],
"symlink_target": ""
} |
import accounts
import praw
import sys
import datetime
import random
username = accounts.amumu_bot_user
password = accounts.amumu_bot_pass
subreddits = 'redditdebuggingspace'
r = praw.Reddit('Amumu Bot by /u/Xwerve and /u/liquidized')
def initialize():
r.login(username, password)
initialize()
comment_ids = []
post_ids = []
scanned_ids = []
##### Triggers and Responses of Amumu_bot in lower case #######
pre_responses = ["Let me give you a hug.", "Let's be friends forever.", "Would you like to be friends?", "Come play with me."]
sad_responses = ["I sense that you are sad. "+response for response in pre_responses]
yes_responses = ["I thought you'd never pick me. (づ。◕‿‿◕。)づⒽⓤⒼ♥"]
no_responses = ["Awww. :(", "Hey, come back! ;___;"]
yes_triggers = ["ok", "yes", "yeah", "yea", "okay", "yep", "let's be friends", "yee", "i love you", " ya ", " ye ", "sure", "all right", "alright", "i want to be your friend", "hug me", "i love hugs", "<3", "i want a hug", "give me a hug", "come here", "/hug", "*hug"]
no_triggers = ["no ", "no, " "nope", "naw ", "sorry", "don't want you", "go away", "fuck off", "one wants to play with you"]
sad_triggers = ["i'm depressed", "i am depressed", "i'm sad", ":'(", "i am lonely",
"i'm lonely", "i have no friends", "i want friends", "i'm crying", " :'(", "sad amumu", ";_;", " )':", ";__;", "i need a hug"]
########################################################################################
def reply_2_comments(comment, triggers, responses):
commenttext = comment.body.lower()
has_trigger = any(trigger in commenttext for trigger in triggers)
if has_trigger and str(comment.author) != username and comment.id not in comment_ids:
comment_ids.append(comment.id)
reply0 = random.choice(responses)
comment.reply(reply0)
current_time = datetime.datetime.now().time()
print('Bot replied to comment', comment, 'posted by', comment.author, 'at', current_time.isoformat())
return True
else:
return False
def has_replied(comment): #check if the scanned comment is a response to an Amumu_bot's comment and respond to it accordingly.
parent_id = comment.parent_id
parent_comment = r.get_info(None, parent_id, None)
if type(parent_comment) == praw.objects.Comment: #in case parent_comment is not a Comment object.
commenttext = parent_comment.body
has_trigger = any(trigger in commenttext for trigger in sad_responses)
if str(parent_comment.author).lower() == username.lower() and has_trigger:
reply_2_comments(comment, yes_triggers, yes_responses) or reply_2_comments(comment, no_triggers, no_responses)
else:
reply_2_comments(comment, sad_triggers, sad_responses)
else:
reply_2_comments(comment, sad_triggers, sad_responses)
### main ###
for raw_comment in praw.helpers.comment_stream(r, subreddits, limit=25, verbosity=0): #go through a stream of new comments in subreddit.
if len(comment_ids) > 200:
comment_ids.pop(0)
if len(scanned_ids) > 300:
scanned_ids.pop(0)
if raw_comment.id not in scanned_ids:
scanned_ids.append(raw_comment.id)
has_replied(raw_comment)
| {
"content_hash": "6f11d2f3d2000457e10407faedd9e8e4",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 268,
"avg_line_length": 44.970588235294116,
"alnum_prop": 0.6654676258992805,
"repo_name": "taemintkim/Amumu_bot",
"id": "3ea30a68945c8f4321d2d6fbbb27cdec0699e0ba",
"size": "3082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amumu_bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3082"
}
],
"symlink_target": ""
} |
import os
import sys
import logging
from optparse import OptionParser
sys.path.append(os.sep.join(
os.path.dirname(os.path.abspath(__file__)).split(os.sep)[:-1]))
import libcloud_rest.log
from libcloud_rest.log import get_logger
from libcloud_rest.constants import VALID_LOG_LEVELS
DEBUG = False
def start_server(host, port, logger, debug):
from werkzeug.serving import run_simple
from libcloud_rest.application import LibcloudRestApp
app = LibcloudRestApp()
logger.info('Debug HTTP server listening on %s:%s' % (host, port))
run_simple(host, port, app,
use_debugger=True, use_reloader=True)
def setup_logger(log_level, log_file):
# Mute default werkzeug logger
werkzeug_logger = logging.getLogger('werkzeug')
werkzeug_logger.setLevel(logging.ERROR)
# Setup main logger
if not log_file:
handler = logging.StreamHandler()
else:
handler = logging.FileHandler(filename=log_file)
new_logger = get_logger(handler=handler, level=log_level)
libcloud_rest.log.logger = new_logger
return new_logger
def main():
usage = 'usage: %prog'
parser = OptionParser(usage=usage)
parser.add_option('--host', dest='host', default='localhost',
help='Host to bind to', metavar='HOST')
parser.add_option('--port', dest='port', default=5000,
help='Port to listen on', metavar='PORT')
parser.add_option('--log-level', dest='log_level', default='info',
help='Log level', metavar='LEVEL')
parser.add_option('--log-file', dest='log_file', default=None,
help='Log file path. If not provided'
' logs will go to stdout',
metavar='PATH')
parser.add_option('--debug', dest='debug', default=False,
action='store_true', help='Enable debug mode')
(options, args) = parser.parse_args()
log_level = options.log_level.upper()
log_file = options.log_file
if log_level not in VALID_LOG_LEVELS:
valid_levels = [value.lower() for value in VALID_LOG_LEVELS]
raise ValueError('Invalid log level: %s. Valid log levels are: %s' %
(options.log_level, ', '.join(valid_levels)))
if options.debug:
log_level = 'DEBUG'
global DEBUG
DEBUG = True
level = getattr(logging, log_level, logging.INFO)
logger = setup_logger(log_level=level, log_file=log_file)
start_server(host=options.host, port=int(options.port),
logger=logger, debug=options.debug)
if __name__ == '__main__':
main()
| {
"content_hash": "11143f7566c1e90c61ac03473c8ae004",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 76,
"avg_line_length": 32.23170731707317,
"alnum_prop": 0.6250472947408248,
"repo_name": "islamgulov/libcloud.rest",
"id": "77a153f4de550a2d57df2f83b401df46d9893e13",
"size": "2666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libcloud_rest/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "612"
},
{
"name": "Python",
"bytes": "199493"
}
],
"symlink_target": ""
} |
class PublicKeyPinsAnalyzer:
def __init__(self):
pass
def analyze(self, parse_results, headers, content):
results = parse_results["Public-Key-Pins"]
parts = []
if results["status"] == "PUBLIC_KEY_PINS_NONE":
parts.append({
"type" : "warning",
"message" : "Public-Key-Pins is not specified. If an attacker can compromise a CA, he could generate certificate that will be accepted by your user."
})
if results["status"] == "PUBLIC_KEY_PINS_REPORT_ONLY":
parts.append({
"type" : "warning",
"message" : "Public-Key-Pins is activated in 'Report-Only' mode. Certificate that are not specified in this header will be accepted, but you will receive notification of it."
})
if results["status"] == "PUBLIC_KEY_PINS_REPORT_DEFINED":
# 15 days threadshold
if results["max-age"] > 15 * 24 * 60 * 60:
parts.append({
"type" : "info",
"message" : "Public-Key-Pins is activated, but it has a really long max-age value. Having long max-age value hinders revocation if one of the certification is compromised."
})
# 15 minutes threadshold
if results["max-age"] < 15 * 60:
parts.append({
"type" : "warning",
"message" : "Public-Key-Pins is activated, but it has a really short max-age value. Having short max-age value can nullify the effect of the header if the information is expired every time the user visits the website."
})
if not results["status"] == "PUBLIC_KEY_PINS_NONE":
if results["report-uri"] is None:
parts.append({
"type" : "info",
"message" : "No 'report-uri' is configured for the header Public-Key-Pins. You won't be notified of certificate rejected by browser."
})
return "Public-Key-Pins Header", parts | {
"content_hash": "35686b2c88a9f5a76b0840b45081df50",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 223,
"avg_line_length": 39.22727272727273,
"alnum_prop": 0.6726535341830823,
"repo_name": "HoLyVieR/http-security-headers",
"id": "77551bdb7bf08919fc39d3d3100d4b85a611295c",
"size": "1726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyzer/PublicKeyPinsAnalyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33357"
}
],
"symlink_target": ""
} |
'''
Bubble
======
.. versionadded:: 1.1.0
.. image:: images/bubble.jpg
:align: right
The Bubble widget is a form of menu or a small popup where the menu options
are stacked either vertically or horizontally.
The :class:`Bubble` contains an arrow pointing in the direction you
choose.
Simple example
--------------
.. include:: ../../examples/widgets/bubble_test.py
:literal:
Customize the Bubble
--------------------
You can choose the direction in which the arrow points::
Bubble(arrow_pos='top_mid')
The widgets added to the Bubble are ordered horizontally by default, like a
Boxlayout. You can change that by::
orientation = 'vertical'
To add items to the bubble::
bubble = Bubble(orientation = 'vertical')
bubble.add_widget(your_widget_instance)
To remove items::
bubble.remove_widget(widget)
or
bubble.clear_widgets()
To access the list of children, use content.children::
bubble.content.children
.. warning::
This is important! Do not use bubble.children
To change the appearance of the bubble::
bubble.background_color = (1, 0, 0, .5) #50% translucent red
bubble.border = [0, 0, 0, 0]
background_image = 'path/to/background/image'
arrow_image = 'path/to/arrow/image'
'''
__all__ = ('Bubble', 'BubbleButton', 'BubbleContent')
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.scatter import Scatter
from kivy.uix.gridlayout import GridLayout
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \
ListProperty, BooleanProperty
from kivy.clock import Clock
from kivy.base import EventLoop
class BubbleButton(Button):
'''A button intended for use in a Bubble widget.
You can use a "normal" button class, but it will not look good unless
the background is changed.
Rather use this BubbleButton widget that is already defined and provides a
suitable background for you.
'''
pass
class BubbleContent(GridLayout):
pass
class Bubble(GridLayout):
'''Bubble class. See module documentation for more information.
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a).
:data:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
border = ListProperty([16, 16, 16, 16])
'''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage`
graphics instruction. Used with the :data:`background_image`.
It should be used when using custom backgrounds.
It must be a list of 4 values: (top, right, bottom, left). Read the
BorderImage instructions for more information about how to use it.
:data:`border` is a :class:`~kivy.properties.ListProperty` and defaults to
(16, 16, 16, 16)
'''
background_image = StringProperty(
'atlas://data/images/defaulttheme/bubble')
'''Background image of the bubble.
:data:`background_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble'.
'''
arrow_image = StringProperty(
'atlas://data/images/defaulttheme/bubble_arrow')
''' Image of the arrow pointing to the bubble.
:data:`arrow_image` is a :class:`~kivy.properties.StringProperty` and
defaults to 'atlas://data/images/defaulttheme/bubble_arrow'.
'''
show_arrow = BooleanProperty(True)
''' Indicates whether to show arrow.
.. versionadded:: 1.8.0
:data:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and
defaults to `True`.
'''
arrow_pos = OptionProperty('bottom_mid', options=(
'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid',
'top_right', 'right_top', 'right_mid', 'right_bottom',
'bottom_left', 'bottom_mid', 'bottom_right'))
'''Specifies the position of the arrow relative to the bubble.
Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right
right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right.
:data:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'bottom_mid'.
'''
content = ObjectProperty(None)
'''This is the object where the main content of the bubble is held.
:data:`content` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
orientation = OptionProperty('horizontal',
options=('horizontal', 'vertical'))
'''This specifies the manner in which the children inside bubble
are arranged. Can be one of 'vertical' or 'horizontal'.
:data:`orientation` is a :class:`~kivy.properties.OptionProperty` and
defaults to 'horizontal'.
'''
limit_to = ObjectProperty(None, allow_none=True)
'''Specifies the widget to which the bubbles position is restricted.
.. versionadded:: 1.6.0
:data:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and
defaults to 'None'.
'''
def __init__(self, **kwargs):
self._prev_arrow_pos = None
self._arrow_layout = BoxLayout()
self._bk_img = Image(
source=self.background_image, allow_stretch=True,
keep_ratio=False, color=self.background_color)
self.background_texture = self._bk_img.texture
self._arrow_img = Image(source=self.arrow_image,
color=self.background_color)
self.content = content = BubbleContent(parent=self)
super(Bubble, self).__init__(**kwargs)
content.parent = None
self.add_widget(content)
self.on_arrow_pos()
def add_widget(self, *l):
content = self.content
if content is None:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).add_widget(*l)
else:
content.add_widget(*l)
def remove_widget(self, *l):
content = self.content
if not content:
return
if l[0] == content or l[0] == self._arrow_img\
or l[0] == self._arrow_layout:
super(Bubble, self).remove_widget(*l)
else:
content.remove_widget(l[0])
def clear_widgets(self, **kwargs):
content = self.content
if not content:
return
if kwargs.get('do_super', False):
super(Bubble, self).clear_widgets()
else:
content.clear_widgets()
def on_show_arrow(self, instance, value):
self._arrow_img.opacity = int(value)
def on_parent(self, instance, value):
Clock.schedule_once(self._update_arrow)
def on_pos(self, instance, pos):
lt = self.limit_to
if lt and lt is not object:
self.limit_to = object
if lt is EventLoop.window:
lt.x = lt.y = 0
lt.top = EventLoop.window.height
lt.right = EventLoop.window.width
self.x = max(self.x, lt.x)
self.right = min(self.right, lt.right)
self.top = min(self.top, lt.top)
self.y = max(self.y, lt.y)
self.limit_to = lt
def on_background_image(self, *l):
self._bk_img.source = self.background_image
def on_background_color(self, *l):
if self.content is None:
return
self._arrow_img.color = self._bk_img.color = self.background_color
def on_orientation(self, *l):
content = self.content
if not content:
return
if self.orientation[0] == 'v':
content.cols = 1
content.rows = 99
else:
content.cols = 99
content.rows = 1
def on_arrow_image(self, *l):
self._arrow_img.source = self.arrow_image
def on_arrow_pos(self, *l):
self_content = self.content
if not self_content:
Clock.schedule_once(self.on_arrow_pos)
return
if self_content not in self.children:
Clock.schedule_once(self.on_arrow_pos)
return
self_arrow_pos = self.arrow_pos
if self._prev_arrow_pos == self_arrow_pos:
return
self._prev_arrow_pos = self_arrow_pos
self_arrow_layout = self._arrow_layout
self_arrow_layout.clear_widgets()
self_arrow_img = self._arrow_img
self._sctr = self._arrow_img
self.clear_widgets(do_super=True)
self_content.parent = None
self_arrow_img.size_hint = (1, None)
self_arrow_img.height = self_arrow_img.texture_size[1]
self_arrow_img.pos = 0, 0
widget_list = []
arrow_list = []
parent = self_arrow_img.parent
if parent:
parent.remove_widget(self_arrow_img)
if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't':
self.cols = 1
self.rows = 3
self_arrow_layout.orientation = 'horizontal'
self_arrow_img.width = self.width / 3
self_arrow_layout.size_hint = (1, None)
self_arrow_layout.height = self_arrow_img.height
if self_arrow_pos[0] == 'b':
if self_arrow_pos == 'bottom_mid':
widget_list = (self_content, self_arrow_img)
else:
if self_arrow_pos == 'bottom_left':
arrow_list = (self_arrow_img, Widget(), Widget())
elif self_arrow_pos == 'bottom_right':
#add two dummy widgets
arrow_list = (Widget(), Widget(), self_arrow_img)
widget_list = (self_content, self_arrow_layout)
else:
sctr = Scatter(do_translation=False,
rotation=180,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=self_arrow_img.size)
sctr.add_widget(self_arrow_img)
if self_arrow_pos == 'top_mid':
#add two dummy widgets
arrow_list = (Widget(), sctr, Widget())
elif self_arrow_pos == 'top_left':
arrow_list = (sctr, Widget(), Widget())
elif self_arrow_pos == 'top_right':
arrow_list = (Widget(), Widget(), sctr)
widget_list = (self_arrow_layout, self_content)
elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r':
self.cols = 3
self.rows = 1
self_arrow_img.width = self.height / 3
self_arrow_layout.orientation = 'vertical'
self_arrow_layout.cols = 1
self_arrow_layout.size_hint = (None, 1)
self_arrow_layout.width = self_arrow_img.height
rotation = -90 if self_arrow_pos[0] == 'l' else 90
self._sctr = sctr = Scatter(do_translation=False,
rotation=rotation,
do_rotation=False,
do_scale=False,
size_hint=(None, None),
size=(self_arrow_img.size))
sctr.add_widget(self_arrow_img)
if self_arrow_pos[-4:] == '_top':
arrow_list = (Widget(size_hint=(1, .07)),
sctr, Widget(size_hint=(1, .3)))
elif self_arrow_pos[-4:] == '_mid':
arrow_list = (Widget(), sctr, Widget())
Clock.schedule_once(self._update_arrow)
elif self_arrow_pos[-7:] == '_bottom':
arrow_list = (Widget(), Widget(), sctr)
if self_arrow_pos[0] == 'l':
widget_list = (self_arrow_layout, self_content)
else:
widget_list = (self_content, self_arrow_layout)
# add widgets to arrow_layout
add = self_arrow_layout.add_widget
for widg in arrow_list:
add(widg)
# add widgets to self
add = self.add_widget
for widg in widget_list:
add(widg)
def _update_arrow(self, *dt):
if self.arrow_pos in ('left_mid', 'right_mid'):
self._sctr.center_y = self._arrow_layout.center_y
| {
"content_hash": "faec101943d3f2821daed434ff5f7ee4",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 79,
"avg_line_length": 34.10382513661202,
"alnum_prop": 0.573946482935427,
"repo_name": "Davideddu/kivy-forkedtouch",
"id": "edfdfc6e3444bed6e31e0f14b7eb3f1d778cc047",
"size": "12482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy/uix/bubble.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158959"
},
{
"name": "CSS",
"bytes": "6827"
},
{
"name": "Emacs Lisp",
"bytes": "9603"
},
{
"name": "F#",
"bytes": "289"
},
{
"name": "JavaScript",
"bytes": "11300"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "2952315"
},
{
"name": "Shell",
"bytes": "6236"
},
{
"name": "TeX",
"bytes": "4271"
},
{
"name": "VimL",
"bytes": "1123"
}
],
"symlink_target": ""
} |
from ems.xtype.base import NumberType, StringType, BoolType, UnitType, DictType
from ems.xtype.base import SequenceType
nameType = StringType()
nameType.minLength=1
nameType.maxLength=12
ageType = UnitType('Jahre', int)
ageType.minValue = 0
ageType.maxValue = 140
ageType.value2UnitSpace = 1
weightType = UnitType('kg', float)
weightType.minValue = 1
weightType.maxValue = 300
weightType.value2UnitSpace = 1
weightType.decimalsCount = 1
weightType.thousandsSeparator = '.'
weightType.decimalsSeparator = ','
moneyType = UnitType('$', float)
moneyType.minValue = 100.0
moneyType.maxValue = 15000.0
moneyType.value2UnitSpace = 1
moneyType.decimalsCount = 2
moneyType.thousandsSeparator = '.'
moneyType.decimalsSeparator = ','
marriedType = BoolType()
itemType = DictType()
itemType.addKey('forename', nameType)
itemType.addKey('surname', nameType)
itemType.addKey('age', ageType)
itemType.addKey('weight', weightType)
itemType.addKey('income', moneyType)
itemType.addKey('married', marriedType)
itemType.maxLength = 8
itemType.minLength = 1
listType = SequenceType(itemType)
testData = [{'forename':'Michael','surname':'Tils','age':4,'weight':104.9,'income': 850.0, 'married':False},
{'forename':'Carol','surname':'Sample','age':31,'weight':68.9,'income':1450.0,'married':False},
{'forename':'Thorsten','surname':'Real','age':29,'weight':72.9,'income':2850.0,'married':False},
{'forename':'Christine','surname':'Clinton','age':28,'weight':65.9,'income':450.0,'married':True},
{'forename':'Sponge','surname':'Bob','age':29,'weight':79.6,'income':3850.0,'married':False}] | {
"content_hash": "bd7514f47c0a79606a72b9424507ea2b",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 110,
"avg_line_length": 33.16326530612245,
"alnum_prop": 0.7193846153846154,
"repo_name": "mtils/ems",
"id": "27141a7f9d0d350d2caae14de61f3e8f05ed84ec",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/xtype/persondata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3631"
},
{
"name": "Python",
"bytes": "1924893"
},
{
"name": "QML",
"bytes": "16755"
}
],
"symlink_target": ""
} |
from six.moves.urllib.parse import quote
import os
import time
import functools
import inspect
import itertools
import operator
from copy import deepcopy
from sys import exc_info
from swift import gettext_ as _
from eventlet import sleep
from eventlet.timeout import Timeout
import six
from swift.common.wsgi import make_pre_authed_env
from swift.common.utils import Timestamp, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
GreenAsyncPile, quorum_size, parse_content_type, \
document_iters_to_http_response_body
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \
ConnectionTimeout, RangeAlreadyComplete
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE
from swift.common.swob import Request, Response, Range, \
HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \
status_map
from swift.common.request_helpers import strip_sys_meta_prefix, \
strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \
http_response_to_document_iters, is_object_transient_sysmeta, \
strip_object_transient_sysmeta_prefix
from swift.common.storage_policy import POLICIES
DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds
DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name == 'etag':
response.headers[name] = value.replace('"', '')
elif name not in ('date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def source_key(resp):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:param resp: bufferedhttp response object
"""
return Timestamp(resp.getheader('x-backend-timestamp') or
resp.getheader('x-put-timestamp') or
resp.getheader('x-timestamp') or 0)
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
return func
def _prep_headers_to_info(headers, server_type):
"""
Helper method that iterates once over a dict of headers,
converting all keys to lower case and separating
into subsets containing user metadata, system metadata
and other headers.
"""
meta = {}
sysmeta = {}
other = {}
for key, val in dict(headers).items():
lkey = key.lower()
if is_user_meta(server_type, lkey):
meta[strip_user_meta_prefix(server_type, lkey)] = val
elif is_sys_meta(server_type, lkey):
sysmeta[strip_sys_meta_prefix(server_type, lkey)] = val
else:
other[lkey] = val
return other, meta, sysmeta
def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'account')
account_info = {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'meta': meta,
'sysmeta': sysmeta,
}
if is_success(status_int):
account_info['account_really_exists'] = not config_true_value(
headers.get('x-backend-fake-account-listing'))
return account_info
def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'container')
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'storage_policy': headers.get('x-backend-storage-policy-index', '0'),
'cors': {
'allow_origin': meta.get('access-control-allow-origin'),
'expose_headers': meta.get('access-control-expose-headers'),
'max_age': meta.get('access-control-max-age')
},
'meta': meta,
'sysmeta': sysmeta,
}
def headers_to_object_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of object info based on response headers.
"""
headers, meta, sysmeta = _prep_headers_to_info(headers, 'object')
transient_sysmeta = {}
for key, val in headers.iteritems():
if is_object_transient_sysmeta(key):
key = strip_object_transient_sysmeta_prefix(key.lower())
transient_sysmeta[key] = val
info = {'status': status_int,
'length': headers.get('content-length'),
'type': headers.get('content-type'),
'etag': headers.get('etag'),
'meta': meta,
'sysmeta': sysmeta,
'transient_sysmeta': transient_sysmeta
}
return info
def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
if controller.app.strict_cors_mode and \
not controller.is_origin_allowed(cors_info, req_origin):
return resp
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
if 'Access-Control-Expose-Headers' not in resp.headers:
expose_headers = set([
'cache-control', 'content-language', 'content-type',
'expires', 'last-modified', 'pragma', 'etag',
'x-timestamp', 'x-trans-id', 'x-openstack-request-id'])
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.add(header.lower())
if cors_info.get('expose_headers'):
expose_headers = expose_headers.union(
[header_line.strip().lower()
for header_line in
cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
if 'Access-Control-Allow-Origin' not in resp.headers:
if cors_info['allow_origin'] and \
cors_info['allow_origin'].strip() == '*':
resp.headers['Access-Control-Allow-Origin'] = '*'
else:
resp.headers['Access-Control-Allow-Origin'] = req_origin
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped
def get_object_info(env, app, path=None, swift_source=None):
"""
Get the info structure for an object, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the object.
"""
(version, account, container, obj) = \
split_path(path or env['PATH_INFO'], 4, 4, True)
info = _get_object_info(app, env, account, container, obj,
swift_source=swift_source)
if info:
info = deepcopy(info)
else:
info = headers_to_object_info({}, 0)
for field in ('length',):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_container_info(env, app, swift_source=None):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the container.
"""
(version, account, container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account, container)
if not info:
# Cache miss; go HEAD the container and populate the caches
env.setdefault('swift.infocache', {})
# Before checking the container, make sure the account exists.
#
# If it is an autocreateable account, just assume it exists; don't
# HEAD the account, as a GET or HEAD response for an autocreateable
# account is successful whether the account actually has .db files
# on disk or not.
is_autocreate_account = account.startswith(
getattr(app, 'auto_create_account_prefix', '.'))
if not is_autocreate_account:
account_info = get_account_info(env, app, swift_source)
if not account_info or not is_success(account_info['status']):
return headers_to_container_info({}, 0)
req = _prepare_pre_auth_info_request(
env, ("/%s/%s/%s" % (version, account, container)),
(swift_source or 'GET_CONTAINER_INFO'))
resp = req.get_response(app)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# See similar comment in get_account_info() for justification.
info = _get_info_from_infocache(env, account, container)
if info is None:
info = set_info_cache(app, env, account, container, resp)
if info:
info = deepcopy(info) # avoid mutating what's in swift.infocache
else:
info = headers_to_container_info({}, 0)
# Old data format in memcache immediately after a Swift upgrade; clean
# it up so consumers of get_container_info() aren't exposed to it.
if 'object_count' not in info and 'container_size' in info:
info['object_count'] = info.pop('container_size')
for field in ('storage_policy', 'bytes', 'object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
.. note::
This call bypasses auth. Success does not imply that the request has
authorization to the account.
:raises ValueError: when path doesn't contain an account
"""
(version, account, _junk, _junk) = \
split_path(env['PATH_INFO'], 2, 4, True)
# Check in environment cache and in memcache (in that order)
info = _get_info_from_caches(app, env, account)
# Cache miss; go HEAD the account and populate the caches
if not info:
env.setdefault('swift.infocache', {})
req = _prepare_pre_auth_info_request(
env, "/%s/%s" % (version, account),
(swift_source or 'GET_ACCOUNT_INFO'))
resp = req.get_response(app)
# Check in infocache to see if the proxy (or anyone else) already
# populated the cache for us. If they did, just use what's there.
#
# The point of this is to avoid setting the value in memcached
# twice. Otherwise, we're needlessly sending requests across the
# network.
#
# If the info didn't make it into the cache, we'll compute it from
# the response and populate the cache ourselves.
#
# Note that this is taking "exists in infocache" to imply "exists in
# memcache". That's because we're trying to avoid superfluous
# network traffic, and checking in memcache prior to setting in
# memcache would defeat the purpose.
info = _get_info_from_infocache(env, account)
if info is None:
info = set_info_cache(app, env, account, None, resp)
if info:
info = info.copy() # avoid mutating what's in swift.infocache
else:
info = headers_to_account_info({}, 0)
for field in ('container_count', 'bytes', 'total_object_count'):
if info.get(field) is None:
info[field] = 0
else:
info[field] = int(info[field])
return info
def get_cache_key(account, container=None, obj=None):
"""
Get the keys for both memcache and env['swift.infocache'] (cache_key)
where info about accounts, containers, and objects is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:param obj: The name of the object (or None if account or container)
:returns: a string cache_key
"""
if obj:
if not (account and container):
raise ValueError('Object cache key requires account and container')
cache_key = 'object/%s/%s/%s' % (account, container, obj)
elif container:
if not account:
raise ValueError('Container cache key requires account')
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
return cache_key
def set_info_cache(app, env, account, container, resp):
"""
Cache info in both memcache and env.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted container name or None
:param resp: the response received or None if info cache should be cleared
:returns: the info that was placed into the cache, or None if the
request status was not in (404, 410, 2xx).
"""
infocache = env.setdefault('swift.infocache', {})
cache_time = None
if container and resp:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Container-Existence',
DEFAULT_RECHECK_CONTAINER_EXISTENCE))
elif resp:
cache_time = int(resp.headers.get(
'X-Backend-Recheck-Account-Existence',
DEFAULT_RECHECK_ACCOUNT_EXISTENCE))
cache_key = get_cache_key(account, container)
if resp:
if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE):
cache_time *= 0.1
elif not is_success(resp.status_int):
cache_time = None
# Next actually set both memcache and the env cache
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if cache_time is None:
infocache.pop(cache_key, None)
if memcache:
memcache.delete(cache_key)
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, time=cache_time)
infocache[cache_key] = info
return info
def set_object_info_cache(app, env, account, container, obj, resp):
"""
Cache object info in the WSGI environment, but not in memcache. Caching
in memcache would lead to cache pressure and mass evictions due to the
large number of objects in a typical Swift cluster. This is a
per-request cache only.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted container name
:param object: the unquoted object name
:param resp: a GET or HEAD response received from an object server, or
None if info cache should be cleared
:returns: the object info
"""
cache_key = get_cache_key(account, container, obj)
if 'swift.infocache' in env and not resp:
env['swift.infocache'].pop(cache_key, None)
return
info = headers_to_object_info(resp.headers, resp.status_int)
env.setdefault('swift.infocache', {})[cache_key] = info
return info
def clear_info_cache(app, env, account, container=None):
"""
Clear the cached info in both memcache and env
:param app: the application object
:param env: the WSGI environment
:param account: the account name
:param container: the containr name or None if setting info for containers
"""
set_info_cache(app, env, account, container, None)
def _get_info_from_infocache(env, account, container=None):
"""
Get cached account or container information from request-environment
cache (swift.infocache).
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss
"""
cache_key = get_cache_key(account, container)
if 'swift.infocache' in env and cache_key in env['swift.infocache']:
return env['swift.infocache'][cache_key]
return None
def _get_info_from_memcache(app, env, account, container=None):
"""
Get cached account or container information from memcache
:param app: the application object
:param env: the environment used by the current request
:param account: the account name
:param container: the container name
:returns: a dictionary of cached info on cache hit, None on miss. Also
returns None if memcache is not in use.
"""
cache_key = get_cache_key(account, container)
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if memcache:
info = memcache.get(cache_key)
if info:
for key in info:
if isinstance(info[key], six.text_type):
info[key] = info[key].encode("utf-8")
elif isinstance(info[key], dict):
for subkey, value in info[key].items():
if isinstance(value, six.text_type):
info[key][subkey] = value.encode("utf-8")
env.setdefault('swift.infocache', {})[cache_key] = info
return info
return None
def _get_info_from_caches(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order.
Used for both account and container info.
:param app: the application object
:param env: the environment used by the current request
:returns: the cached info or None if not cached
"""
info = _get_info_from_infocache(env, account, container)
if info is None:
info = _get_info_from_memcache(app, env, account, container)
return info
def _prepare_pre_auth_info_request(env, path, swift_source):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source=swift_source)
# This is a sub request for container metadata- drop the Origin header from
# the request so the it is not treated as a CORS request.
newenv.pop('HTTP_ORIGIN', None)
# ACLs are only shown to account owners, so let's make sure this request
# looks like it came from the account owner.
newenv['swift_owner'] = True
# Note that Request.blank expects quoted path
return Request.blank(quote(path), environ=newenv)
def get_info(app, env, account, container=None, swift_source=None):
"""
Get info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:param swift_source: swift source logged for any subrequests made while
retrieving the account or container info
:returns: information about the specified entity in a dictionary. See
get_account_info and get_container_info for details on what's in the
dictionary.
"""
env.setdefault('swift.infocache', {})
if container:
path = '/v1/%s/%s' % (account, container)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_container_info(path_env, app, swift_source=swift_source)
else:
# account info
path = '/v1/%s' % (account,)
path_env = env.copy()
path_env['PATH_INFO'] = path
return get_account_info(path_env, app, swift_source=swift_source)
def _get_object_info(app, env, account, container, obj, swift_source=None):
"""
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container
:param obj: The unquoted name of the object
:returns: the cached info or None if cannot be retrieved
"""
cache_key = get_cache_key(account, container, obj)
info = env.get('swift.infocache', {}).get(cache_key)
if info:
return info
# Not in cache, let's try the object servers
path = '/v1/%s/%s/%s' % (account, container, obj)
req = _prepare_pre_auth_info_request(env, path, swift_source)
resp = req.get_response(app)
# Unlike get_account_info() and get_container_info(), we don't save
# things in memcache, so we can store the info without network traffic,
# *and* the proxy doesn't cache object info for us, so there's no chance
# that the object info would be in the environment. Thus, we just
# compute the object info based on the response and stash it in
# swift.infocache.
info = set_object_info_cache(app, env, account, container, obj, resp)
return info
def close_swift_conn(src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
# Since the backends set "Connection: close" in their response
# headers, the response object (src) is solely responsible for the
# socket. The connection object (src.swift_conn) has no references
# to the socket, so calling its close() method does nothing, and
# therefore we don't do it.
#
# Also, since calling the response's close() method might not
# close the underlying socket but only decrement some
# reference-counter, we have a special method here that really,
# really kills the underlying socket with a close() syscall.
src.nuke_from_orbit() # it's the only way to be sure
except Exception:
pass
def bytes_to_skip(record_size, range_start):
"""
Assume an object is composed of N records, where the first N-1 are all
the same size and the last is at most that large, but may be smaller.
When a range request is made, it might start with a partial record. This
must be discarded, lest the consumer get bad data. This is particularly
true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the
size of the object is unknown at the time the request is made.
This function computes the number of bytes that must be discarded to
ensure only whole records are yielded. Erasure-code decoding needs this.
This function could have been inlined, but it took enough tries to get
right that some targeted unit tests were desirable, hence its extraction.
"""
return (record_size - (range_start % record_size)) % record_size
class ResumingGetter(object):
def __init__(self, app, req, server_type, node_iter, partition, path,
backend_headers, concurrency=1, client_chunk_size=None,
newest=None, header_provider=None):
self.app = app
self.node_iter = node_iter
self.server_type = server_type
self.partition = partition
self.path = path
self.backend_headers = backend_headers
self.client_chunk_size = client_chunk_size
self.skip_bytes = 0
self.bytes_used_from_backend = 0
self.used_nodes = []
self.used_source_etag = ''
self.concurrency = concurrency
self.node = None
self.header_provider = header_provider
# stuff from request
self.req_method = req.method
self.req_path = req.path
self.req_query_string = req.query_string
if newest is None:
self.newest = config_true_value(req.headers.get('x-newest', 'f'))
else:
self.newest = newest
# populated when finding source
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
# populated from response headers
self.start_byte = self.end_byte = self.length = None
def fast_forward(self, num_bytes):
"""
Will skip num_bytes into the current ranges.
:params num_bytes: the number of bytes that have already been read on
this request. This will change the Range header
so that the next req will start where it left off.
:raises ValueError: if invalid range header
:raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes
> end of range + 1
:raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1
"""
if 'Range' in self.backend_headers:
req_range = Range(self.backend_headers['Range'])
begin, end = req_range.ranges[0]
if begin is None:
# this is a -50 range req (last 50 bytes of file)
end -= num_bytes
if end == 0:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
else:
begin += num_bytes
if end is not None and begin == end + 1:
# we sent out exactly the first range's worth of bytes, so
# we're done with it
raise RangeAlreadyComplete()
if end is not None and (begin > end or end < 0):
raise HTTPRequestedRangeNotSatisfiable()
req_range.ranges = [(begin, end)] + req_range.ranges[1:]
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers['Range'] = 'bytes=%d-' % num_bytes
def pop_range(self):
"""
Remove the first byterange from our Range header.
This is used after a byterange has been completely sent to the
client; this way, should we need to resume the download from another
object server, we do not re-fetch byteranges that the client already
has.
If we have no Range header, this is a no-op.
"""
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
except ValueError:
# there's a Range header, but it's garbage, so get rid of it
self.backend_headers.pop('Range')
return
begin, end = req_range.ranges.pop(0)
if len(req_range.ranges) > 0:
self.backend_headers['Range'] = str(req_range)
else:
self.backend_headers.pop('Range')
def learn_size_from_content_range(self, start, end, length):
"""
If client_chunk_size is set, makes sure we yield things starting on
chunk boundaries based on the Content-Range header in the response.
Sets our Range header's first byterange to the value learned from
the Content-Range header in the response; if we were given a
fully-specified range (e.g. "bytes=123-456"), this is a no-op.
If we were given a half-specified range (e.g. "bytes=123-" or
"bytes=-456"), then this changes the Range header to a
semantically-equivalent one *and* it lets us resume on a proper
boundary instead of just in the middle of a piece somewhere.
"""
if length == 0:
return
if self.client_chunk_size:
self.skip_bytes = bytes_to_skip(self.client_chunk_size, start)
if 'Range' in self.backend_headers:
try:
req_range = Range(self.backend_headers['Range'])
new_ranges = [(start, end)] + req_range.ranges[1:]
except ValueError:
new_ranges = [(start, end)]
else:
new_ranges = [(start, end)]
self.backend_headers['Range'] = (
"bytes=" + (",".join("%s-%s" % (s if s is not None else '',
e if e is not None else '')
for s, e in new_ranges)))
def is_good_source(self, src):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param src: the response from the backend
:returns: True if found, False if not
"""
if self.server_type == 'Object' and src.status == 416:
return True
return is_success(src.status) or is_redirection(src.status)
def response_parts_iter(self, req):
source, node = self._get_source_and_node()
it = None
if source:
it = self._get_response_parts_iter(req, node, source)
return it
def _get_response_parts_iter(self, req, node, source):
# Someday we can replace this [mess] with python 3's "nonlocal"
source = [source]
node = [node]
try:
client_chunk_size = self.client_chunk_size
node_timeout = self.app.node_timeout
if self.server_type == 'Object':
node_timeout = self.app.recoverable_node_timeout
# This is safe; it sets up a generator but does not call next()
# on it, so no IO is performed.
parts_iter = [
http_response_to_document_iters(
source[0], read_chunk_size=self.app.object_chunk_size)]
def get_next_doc_part():
while True:
try:
# This call to next() performs IO when we have a
# multipart/byteranges response; it reads the MIME
# boundary and part headers.
#
# If we don't have a multipart/byteranges response,
# but just a 200 or a single-range 206, then this
# performs no IO, and either just returns source or
# raises StopIteration.
with ChunkReadTimeout(node_timeout):
# if StopIteration is raised, it escapes and is
# handled elsewhere
start_byte, end_byte, length, headers, part = next(
parts_iter[0])
return (start_byte, end_byte, length, headers, part)
except ChunkReadTimeout:
new_source, new_node = self._get_source_and_node()
if new_source:
self.app.exception_occurred(
node[0], _('Object'),
_('Trying to read during GET (retrying)'))
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
source[0] = new_source
node[0] = new_node
# This is safe; it sets up a generator but does
# not call next() on it, so no IO is performed.
parts_iter[0] = http_response_to_document_iters(
new_source,
read_chunk_size=self.app.object_chunk_size)
else:
raise StopIteration()
def iter_bytes_from_response_part(part_file):
nchunks = 0
buf = ''
while True:
try:
with ChunkReadTimeout(node_timeout):
chunk = part_file.read(self.app.object_chunk_size)
nchunks += 1
buf += chunk
except ChunkReadTimeout:
exc_type, exc_value, exc_traceback = exc_info()
if self.newest or self.server_type != 'Object':
six.reraise(exc_type, exc_value, exc_traceback)
try:
self.fast_forward(self.bytes_used_from_backend)
except (HTTPException, ValueError):
six.reraise(exc_type, exc_value, exc_traceback)
except RangeAlreadyComplete:
break
buf = ''
new_source, new_node = self._get_source_and_node()
if new_source:
self.app.exception_occurred(
node[0], _('Object'),
_('Trying to read during GET (retrying)'))
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
source[0] = new_source
node[0] = new_node
# This is safe; it just sets up a generator but
# does not call next() on it, so no IO is
# performed.
parts_iter[0] = http_response_to_document_iters(
new_source,
read_chunk_size=self.app.object_chunk_size)
try:
_junk, _junk, _junk, _junk, part_file = \
get_next_doc_part()
except StopIteration:
# Tried to find a new node from which to
# finish the GET, but failed. There's
# nothing more to do here.
return
else:
six.reraise(exc_type, exc_value, exc_traceback)
else:
if buf and self.skip_bytes:
if self.skip_bytes < len(buf):
buf = buf[self.skip_bytes:]
self.bytes_used_from_backend += self.skip_bytes
self.skip_bytes = 0
else:
self.skip_bytes -= len(buf)
self.bytes_used_from_backend += len(buf)
buf = ''
if not chunk:
if buf:
with ChunkWriteTimeout(
self.app.client_timeout):
self.bytes_used_from_backend += len(buf)
yield buf
buf = ''
break
if client_chunk_size is not None:
while len(buf) >= client_chunk_size:
client_chunk = buf[:client_chunk_size]
buf = buf[client_chunk_size:]
with ChunkWriteTimeout(
self.app.client_timeout):
self.bytes_used_from_backend += \
len(client_chunk)
yield client_chunk
else:
with ChunkWriteTimeout(self.app.client_timeout):
self.bytes_used_from_backend += len(buf)
yield buf
buf = ''
# This is for fairness; if the network is outpacing
# the CPU, we'll always be able to read and write
# data without encountering an EWOULDBLOCK, and so
# eventlet will not switch greenthreads on its own.
# We do it manually so that clients don't starve.
#
# The number 5 here was chosen by making stuff up.
# It's not every single chunk, but it's not too big
# either, so it seemed like it would probably be an
# okay choice.
#
# Note that we may trampoline to other greenthreads
# more often than once every 5 chunks, depending on
# how blocking our network IO is; the explicit sleep
# here simply provides a lower bound on the rate of
# trampolining.
if nchunks % 5 == 0:
sleep()
part_iter = None
try:
while True:
start_byte, end_byte, length, headers, part = \
get_next_doc_part()
self.learn_size_from_content_range(
start_byte, end_byte, length)
self.bytes_used_from_backend = 0
part_iter = iter_bytes_from_response_part(part)
yield {'start_byte': start_byte, 'end_byte': end_byte,
'entity_length': length, 'headers': headers,
'part_iter': part_iter}
self.pop_range()
except StopIteration:
req.environ['swift.non_client_disconnect'] = True
finally:
if part_iter:
part_iter.close()
except ChunkReadTimeout:
self.app.exception_occurred(node[0], _('Object'),
_('Trying to read during GET'))
raise
except ChunkWriteTimeout:
self.app.logger.warning(
_('Client did not read from proxy within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
except GeneratorExit:
exc_type, exc_value, exc_traceback = exc_info()
warn = True
try:
req_range = Range(self.backend_headers['Range'])
except ValueError:
req_range = None
if req_range and len(req_range.ranges) == 1:
begin, end = req_range.ranges[0]
if end is not None and begin is not None:
if end - begin + 1 == self.bytes_used_from_backend:
warn = False
if not req.environ.get('swift.non_client_disconnect') and warn:
self.app.logger.warning(_('Client disconnected on read'))
six.reraise(exc_type, exc_value, exc_traceback)
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
finally:
# Close-out the connection as best as possible.
if getattr(source[0], 'swift_conn', None):
close_swift_conn(source[0])
@property
def last_status(self):
if self.statuses:
return self.statuses[-1]
else:
return None
@property
def last_headers(self):
if self.source_headers:
return HeaderKeyDict(self.source_headers[-1])
else:
return None
def _make_node_request(self, node, node_timeout, logger_thread_locals):
self.app.logger.thread_locals = logger_thread_locals
if node in self.used_nodes:
return False
req_headers = dict(self.backend_headers)
# a request may be specialised with specific backend headers
if self.header_provider:
req_headers.update(self.header_provider())
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'],
self.partition, self.req_method, self.path,
headers=req_headers,
query_string=self.req_query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': self.req_method, 'path': self.req_path})
return False
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
close_swift_conn(possible_source)
else:
if self.used_source_etag:
src_headers = dict(
(k.lower(), v) for k, v in
possible_source.getheaders())
if self.used_source_etag != src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"'):
self.statuses.append(HTTP_NOT_FOUND)
self.reasons.append('')
self.bodies.append('')
self.source_headers.append([])
return False
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(None)
self.source_headers.append(possible_source.getheaders())
self.sources.append((possible_source, node))
if not self.newest: # one good source is enough
return True
else:
self.statuses.append(possible_source.status)
self.reasons.append(possible_source.reason)
self.bodies.append(possible_source.read())
self.source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.app.error_occurred(
node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': self.bodies[-1][:1024],
'type': self.server_type})
return False
def _get_source_and_node(self):
self.statuses = []
self.reasons = []
self.bodies = []
self.source_headers = []
self.sources = []
nodes = GreenthreadSafeIterator(self.node_iter)
node_timeout = self.app.node_timeout
if self.server_type == 'Object' and not self.newest:
node_timeout = self.app.recoverable_node_timeout
pile = GreenAsyncPile(self.concurrency)
for node in nodes:
pile.spawn(self._make_node_request, node, node_timeout,
self.app.logger.thread_locals)
_timeout = self.app.concurrency_timeout \
if pile.inflight < self.concurrency else None
if pile.waitfirst(_timeout):
break
else:
# ran out of nodes, see if any stragglers will finish
any(pile)
if self.sources:
self.sources.sort(key=lambda s: source_key(s[0]))
source, node = self.sources.pop()
for src, _junk in self.sources:
close_swift_conn(src)
self.used_nodes.append(node)
src_headers = dict(
(k.lower(), v) for k, v in
source.getheaders())
# Save off the source etag so that, if we lose the connection
# and have to resume from a different node, we can be sure that
# we have the same object (replication) or a fragment archive
# from the same object (EC). Otherwise, if the cluster has two
# versions of the same object, we might end up switching between
# old and new mid-stream and giving garbage to the client.
self.used_source_etag = src_headers.get(
'x-object-sysmeta-ec-etag',
src_headers.get('etag', '')).strip('"')
self.node = node
return source, node
return None, None
class GetOrHeadHandler(ResumingGetter):
def _make_app_iter(self, req, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param req: incoming request object
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
ct = source.getheader('Content-Type')
if ct:
content_type, content_type_attrs = parse_content_type(ct)
is_multipart = content_type == 'multipart/byteranges'
else:
is_multipart = False
boundary = "dontcare"
if is_multipart:
# we need some MIME boundary; fortunately, the object server has
# furnished one for us, so we'll just re-use it
boundary = dict(content_type_attrs)["boundary"]
parts_iter = self._get_response_parts_iter(req, node, source)
def add_content_type(response_part):
response_part["content_type"] = \
HeaderKeyDict(response_part["headers"]).get("Content-Type")
return response_part
return document_iters_to_http_response_body(
(add_content_type(pi) for pi in parts_iter),
boundary, is_multipart, self.app.logger)
def get_working_response(self, req):
source, node = self._get_source_and_node()
res = None
if source:
res = Response(request=req)
res.status = source.status
update_headers(res, source.getheaders())
if req.method == 'GET' and \
source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(req, node, source)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = source.swift_conn
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = \
source.getheader('x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = source.getheader('Content-Length')
if source.getheader('Content-Type'):
res.charset = None
res.content_type = source.getheader('Content-Type')
return res
class NodeIter(object):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param app: a proxy app
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
def __init__(self, app, ring, partition, node_iter=None):
self.app = app
self.ring = ring
self.partition = partition
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(
part_nodes, ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
self.nodes_left = self.app.request_node_count(num_primary_nodes)
self.expected_handoffs = self.nodes_left - num_primary_nodes
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
self.primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
self.handoff_iter = node_iter
self._node_provider = None
def __iter__(self):
self._node_iter = self._node_gen()
return self
def log_handoffs(self, handoffs):
"""
Log handoff requests if handoff logging is enabled and the
handoff was not expected.
We only log handoffs when we've pushed the handoff count further
than we would normally have expected under normal circumstances,
that is (request_node_count - num_primaries), when handoffs goes
higher than that it means one of the primaries must have been
skipped because of error limiting before we consumed all of our
nodes_left.
"""
if not self.app.log_handoffs:
return
extra_handoffs = handoffs - self.expected_handoffs
if extra_handoffs > 0:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if (extra_handoffs == len(self.primary_nodes)):
# all the primaries were skipped, and handoffs didn't help
self.app.logger.increment('handoff_all_count')
def set_node_provider(self, callback):
"""
Install a callback function that will be used during a call to next()
to get an alternate node instead of returning the next node from the
iterator.
:param callback: A no argument function that should return a node dict
or None.
"""
self._node_provider = callback
def _node_gen(self):
for node in self.primary_nodes:
if not self.app.error_limited(node):
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
handoffs = 0
for node in self.handoff_iter:
if not self.app.error_limited(node):
handoffs += 1
self.log_handoffs(handoffs)
yield node
if not self.app.error_limited(node):
self.nodes_left -= 1
if self.nodes_left <= 0:
return
def next(self):
if self._node_provider:
# give node provider the opportunity to inject a node
node = self._node_provider()
if node:
return node
return next(self._node_iter)
def __next__(self):
return self.next()
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
"""
Creates a controller attached to an application instance
:param app: the application instance
"""
self.account_name = None
self.app = app
self.trans_id = '-'
self._allowed_methods = None
@property
def allowed_methods(self):
if self._allowed_methods is None:
self._allowed_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'publicly_accessible', False):
self._allowed_methods.add(name)
return self._allowed_methods
def _x_remove_headers(self):
"""
Returns a list of headers that must not be sent to the backend
:returns: a list of header
"""
return []
def transfer_headers(self, src_headers, dst_headers):
"""
Transfer legal headers from an original client request to dictionary
that will be used as headers by the backend request
:param src_headers: A dictionary of the original client request headers
:param dst_headers: A dictionary of the backend request headers
"""
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in self._x_remove_headers())
dst_headers.update((k.lower(), v)
for k, v in src_headers.items()
if k.lower() in self.pass_through_headers or
is_sys_or_user_meta(st, k))
def generate_request_headers(self, orig_req=None, additional=None,
transfer=False):
"""
Create a list of headers to be used in backend requests
:param orig_req: the original request sent by the client to the proxy
:param additional: additional headers to send to the backend
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
# Use the additional headers first so they don't overwrite the headers
# we require.
headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
if transfer:
self.transfer_headers(orig_req.headers, headers)
headers.setdefault('x-timestamp', Timestamp(time.time()).internal)
if orig_req:
referer = orig_req.as_referer()
else:
referer = ''
headers['x-trans-id'] = self.trans_id
headers['connection'] = 'close'
headers['user-agent'] = 'proxy-server %s' % os.getpid()
headers['referer'] = referer
return headers
def account_info(self, account, req=None):
"""
Get account information, and also verify that the account exists.
:param account: name of the account to get the info for
:param req: caller's HTTP request context object (optional)
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
partition, nodes = self.app.account_ring.get_nodes(account)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s" % (account,)
info = get_account_info(path_env, self.app)
if (not info
or not is_success(info['status'])
or not info.get('account_really_exists', True)):
return None, None, None
container_count = info['container_count']
return partition, nodes, container_count
def container_info(self, account, container, req=None):
"""
Get container information and thusly verify container existence.
This will also verify account existence.
:param account: account name for the container
:param container: container name to look up
:param req: caller's HTTP request context object (optional)
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
part, nodes = self.app.container_ring.get_nodes(account, container)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
env.setdefault('swift.infocache', {})
path_env = env.copy()
path_env['PATH_INFO'] = "/v1/%s/%s" % (account, container)
info = get_container_info(path_env, self.app)
if not info or not is_success(info.get('status')):
info = headers_to_container_info({}, 0)
info['partition'] = None
info['nodes'] = None
else:
info['partition'] = part
info['nodes'] = nodes
return info
def _make_request(self, nodes, part, method, path, headers, query,
logger_thread_locals):
"""
Iterates over the given node iterator, sending an HTTP request to one
node at a time. The first non-informational, non-server-error
response is returned. If no non-informational, non-server-error
response is received from any of the nodes, returns None.
:param nodes: an iterator of the backend server and handoff servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: dictionary of headers
:param query: query string to send to the backend.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
:returns: a swob.Response object, or None if no responses were received
"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_node_timing = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if not is_informational(resp.status) and \
not is_server_error(resp.status):
return resp.status, resp.reason, resp.getheaders(), \
resp.read()
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node,
_('ERROR Insufficient Storage'))
elif is_server_error(resp.status):
self.app.error_occurred(
node, _('ERROR %(status)d '
'Trying to %(method)s %(path)s'
' From %(type)s Server') % {
'status': resp.status,
'method': method,
'path': path,
'type': self.server_type})
except (Exception, Timeout):
self.app.exception_occurred(
node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string='', overrides=None):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param req: a request sent by the client
:param ring: the ring used for finding backend servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
(full path ends up being /<$device>/<$part>/<$path>)
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query_string: optional query string to send to the backend
:param overrides: optional return status override map used to override
the returned status of a request.
:returns: a swob.Response object
"""
start_nodes = ring.get_part_nodes(part)
nodes = GreenthreadSafeIterator(self.app.iter_nodes(ring, part))
pile = GreenAsyncPile(len(start_nodes))
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, self.app.logger.thread_locals)
response = []
statuses = []
for resp in pile:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
if self.have_quorum(statuses, len(start_nodes)):
break
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for resp in finished_quickly:
if not resp:
continue
response.append(resp)
statuses.append(resp[0])
while len(response) < len(start_nodes):
response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
statuses, reasons, resp_headers, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method),
overrides=overrides, headers=resp_headers)
def _quorum_size(self, n):
"""
Number of successful backend responses needed for the proxy to
consider the client request successful.
"""
return quorum_size(n)
def have_quorum(self, statuses, node_count, quorum=None):
"""
Given a list of statuses from several requests, determine if
a quorum response can already be decided.
:param statuses: list of statuses returned
:param node_count: number of nodes being queried (basically ring count)
:param quorum: number of statuses required for quorum
:returns: True or False, depending on if quorum is established
"""
if quorum is None:
quorum = self._quorum_size(node_count)
if len(statuses) >= quorum:
for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES,
HTTP_BAD_REQUEST):
if sum(1 for s in statuses
if hundred <= s < hundred + 100) >= quorum:
return True
return False
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None, overrides=None,
quorum_size=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:param overrides: overrides to apply when lacking quorum
:param quorum_size: quorum size to use
:returns: swob.Response object with the correct status, body, etc. set
"""
if quorum_size is None:
quorum_size = self._quorum_size(len(statuses))
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
quorum_size=quorum_size)
if overrides and not resp:
faked_up_status_indices = set()
transformed = []
for (i, (status, reason, hdrs, body)) in enumerate(zip(
statuses, reasons, headers, bodies)):
if status in overrides:
faked_up_status_indices.add(i)
transformed.append((overrides[status], '', '', ''))
else:
transformed.append((status, reason, hdrs, body))
statuses, reasons, headers, bodies = zip(*transformed)
resp = self._compute_quorum_response(
req, statuses, reasons, bodies, etag, headers,
indices_to_avoid=faked_up_status_indices,
quorum_size=quorum_size)
if not resp:
resp = HTTPServiceUnavailable(request=req)
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
return resp
def _compute_quorum_response(self, req, statuses, reasons, bodies, etag,
headers, quorum_size, indices_to_avoid=()):
if not statuses:
return None
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[(i, s) for i, s in enumerate(statuses)
if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size:
try:
status_index, status = max(
((i, stat) for i, stat in hstatuses
if i not in indices_to_avoid),
key=operator.itemgetter(1))
except ValueError:
# All statuses were indices to avoid
continue
resp = status_map[status](request=req)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
return None
@public
def GET(self, req):
"""
Handler for HTTP GET requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""
Handler for HTTP HEAD requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
def autocreate_account(self, req, account):
"""
Autocreate an account
:param req: request leading to this autocreate
:param account: the unquoted account name
"""
partition, nodes = self.app.account_ring.get_nodes(account)
path = '/%s' % account
headers = {'X-Timestamp': Timestamp(time.time()).internal,
'X-Trans-Id': self.trans_id,
'X-Openstack-Request-Id': self.trans_id,
'Connection': 'close'}
# transfer any x-account-sysmeta headers from original request
# to the autocreate PUT
headers.update((k, v)
for k, v in req.headers.items()
if is_sys_meta('account', k))
resp = self.make_requests(Request.blank('/v1' + path),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if is_success(resp.status_int):
self.app.logger.info(_('autocreate account %r'), path)
clear_info_cache(self.app, req.environ, account)
else:
self.app.logger.warning(_('Could not autocreate account %r'),
path)
def GETorHEAD_base(self, req, server_type, node_iter, partition, path,
concurrency=1, client_chunk_size=None):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type used in logging
:param node_iter: an iterator to obtain nodes from
:param partition: partition
:param path: path for the request
:param concurrency: number of requests to run concurrently
:param client_chunk_size: chunk size for response body iterator
:returns: swob.Response object
"""
backend_headers = self.generate_request_headers(
req, additional=req.headers)
handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter,
partition, path, backend_headers,
concurrency,
client_chunk_size=client_chunk_size)
res = handler.get_working_response(req)
if not res:
res = self.best_response(
req, handler.statuses, handler.reasons, handler.bodies,
'%s %s' % (server_type, req.method),
headers=handler.source_headers)
# if a backend policy index is present in resp headers, translate it
# here with the friendly policy name
if 'X-Backend-Storage-Policy-Index' in res.headers and \
is_success(res.status_int):
policy = \
POLICIES.get_by_index(
res.headers['X-Backend-Storage-Policy-Index'])
if policy:
res.headers['X-Storage-Policy'] = policy.name
else:
self.app.logger.error(
'Could not translate %s (%r) from %r to policy',
'X-Backend-Storage-Policy-Index',
res.headers['X-Backend-Storage-Policy-Index'], path)
return res
def is_origin_allowed(self, cors_info, origin):
"""
Is the given Origin allowed to make requests to this resource
:param cors_info: the resource's CORS related metadata headers
:param origin: the origin making the request
:return: True or False
"""
allowed_origins = set()
if cors_info.get('allow_origin'):
allowed_origins.update(
[a.strip()
for a in cors_info['allow_origin'].split(' ')
if a.strip()])
if self.app.cors_allow_origin:
allowed_origins.update(self.app.cors_allow_origin)
return origin in allowed_origins or '*' in allowed_origins
@public
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods)}
resp = Response(status=200, request=req, headers=headers)
# If this isn't a CORS pre-flight request then return now
req_origin_value = req.headers.get('Origin', None)
if not req_origin_value:
return resp
# This is a CORS preflight request so check it's allowed
try:
container_info = \
self.container_info(self.account_name,
self.container_name, req)
except AttributeError:
# This should only happen for requests to the Account. A future
# change could allow CORS requests to the Account level as well.
return resp
cors = container_info.get('cors', {})
# If the CORS origin isn't allowed return a 401
if not self.is_origin_allowed(cors, req_origin_value) or (
req.headers.get('Access-Control-Request-Method') not in
self.allowed_methods):
resp.status = HTTP_UNAUTHORIZED
return resp
# Allow all headers requested in the request. The CORS
# specification does leave the door open for this, as mentioned in
# http://www.w3.org/TR/cors/#resource-preflight-requests
# Note: Since the list of headers can be unbounded
# simply returning headers can be enough.
allow_headers = set()
if req.headers.get('Access-Control-Request-Headers'):
allow_headers.update(
list_from_csv(req.headers['Access-Control-Request-Headers']))
# Populate the response with the CORS preflight headers
if cors.get('allow_origin') and \
cors.get('allow_origin').strip() == '*':
headers['access-control-allow-origin'] = '*'
else:
headers['access-control-allow-origin'] = req_origin_value
if cors.get('max_age') is not None:
headers['access-control-max-age'] = cors.get('max_age')
headers['access-control-allow-methods'] = \
', '.join(self.allowed_methods)
if allow_headers:
headers['access-control-allow-headers'] = ', '.join(allow_headers)
resp.headers = headers
return resp
| {
"content_hash": "86d7e5443459deed2b8b5e40e0023901",
"timestamp": "",
"source": "github",
"line_count": 1886,
"max_line_length": 79,
"avg_line_length": 41.23488865323436,
"alnum_prop": 0.5653023698388818,
"repo_name": "hurricanerix/swift",
"id": "be98bc15ddcb2036d1f9560ff69cb4086e99fc14",
"size": "79005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/proxy/controllers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "248"
},
{
"name": "PHP",
"bytes": "377"
},
{
"name": "Python",
"bytes": "8216497"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print('backend publishing "com.myapp.topic1"', counter)
self.publish('com.myapp.topic1', counter)
counter += 1
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| {
"content_hash": "cd52109ae457a8cc624abf02c1f5f10a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 28.939393939393938,
"alnum_prop": 0.6534031413612565,
"repo_name": "tomwire/AutobahnPython",
"id": "875ce4e4782e0fdcde4abd93385c3b1576a1939f",
"size": "2232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/pubsub/basic/backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3305"
},
{
"name": "Python",
"bytes": "882787"
}
],
"symlink_target": ""
} |
"""
A connection to a hypervisor through libvirt.
Supports KVM, LXC, QEMU, UML, XEN and Parallels.
"""
import collections
import contextlib
import errno
import functools
import glob
import mmap
import operator
import os
import random
import shutil
import sys
import tempfile
import time
import uuid
import eventlet
from eventlet import greenthread
from eventlet import tpool
from lxml import etree
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
import six
from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import arch
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_mode
from nova.console import serial as serial_console
from nova.console import type as ctype
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import loopingcall
from nova.pci import manager as pci_manager
from nova.pci import utils as pci_utils
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt import diagnostics
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import guestfs
from nova.virt import driver
from nova.virt import firewall
from nova.virt import hardware
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import dmcrypt
from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import vif as libvirt_vif
from nova.virt import netutils
from nova.virt import watchdog_actions
from nova import volume
from nova.volume import encryptors
libvirt = None
LOG = logging.getLogger(__name__)
libvirt_opts = [
cfg.StrOpt('rescue_image_id',
help='Rescue ami image. This will not be used if an image id '
'is provided by the user.'),
cfg.StrOpt('rescue_kernel_id',
help='Rescue aki image'),
cfg.StrOpt('rescue_ramdisk_id',
help='Rescue ari image'),
cfg.StrOpt('virt_type',
default='kvm',
help='Libvirt domain type (valid options are: '
'kvm, lxc, qemu, uml, xen and parallels)'),
cfg.StrOpt('connection_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on virt_type)'),
cfg.BoolOpt('inject_password',
default=False,
help='Inject the admin password at boot time, '
'without an agent.'),
cfg.BoolOpt('inject_key',
default=False,
help='Inject the ssh public key at boot time'),
cfg.IntOpt('inject_partition',
default=-2,
help='The partition to inject to : '
'-2 => disable, -1 => inspect (libguestfs only), '
'0 => not partitioned, >0 => partition number'),
cfg.BoolOpt('use_usb_tablet',
default=True,
help='Sync virtual and real mouse cursors in Windows VMs'),
cfg.StrOpt('live_migration_uri',
default="qemu+tcp://%s/system",
help='Migration target URI '
'(any included "%s" is replaced with '
'the migration target hostname)'),
cfg.StrOpt('live_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED',
help='Migration flags to be set for live migration'),
cfg.StrOpt('block_migration_flag',
default='VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, '
'VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, '
'VIR_MIGRATE_NON_SHARED_INC',
help='Migration flags to be set for block migration'),
cfg.IntOpt('live_migration_bandwidth',
default=0,
help='Maximum bandwidth to be used during migration, in Mbps'),
cfg.StrOpt('snapshot_image_format',
help='Snapshot image format (valid options are : '
'raw, qcow2, vmdk, vdi). '
'Defaults to same as source image'),
cfg.StrOpt('disk_prefix',
help='Override the default disk prefix for the devices attached'
' to a server, which is dependent on virt_type. '
'(valid options are: sd, xvd, uvd, vd)'),
cfg.IntOpt('wait_soft_reboot_seconds',
default=120,
help='Number of seconds to wait for instance to shut down after'
' soft reboot request is made. We fall back to hard reboot'
' if instance does not shutdown within this window.'),
cfg.StrOpt('cpu_mode',
help='Set to "host-model" to clone the host CPU feature flags; '
'to "host-passthrough" to use the host CPU model exactly; '
'to "custom" to use a named CPU model; '
'to "none" to not set any CPU model. '
'If virt_type="kvm|qemu", it will default to '
'"host-model", otherwise it will default to "none"'),
cfg.StrOpt('cpu_model',
help='Set to a named libvirt CPU model (see names listed '
'in /usr/share/libvirt/cpu_map.xml). Only has effect if '
'cpu_mode="custom" and virt_type="kvm|qemu"'),
cfg.StrOpt('snapshots_directory',
default='$instances_path/snapshots',
help='Location where libvirt driver will store snapshots '
'before uploading them to image service'),
cfg.StrOpt('xen_hvmloader_path',
default='/usr/lib/xen/boot/hvmloader',
help='Location where the Xen hvmloader is kept'),
cfg.ListOpt('disk_cachemodes',
default=[],
help='Specific cachemodes to use for different disk types '
'e.g: file=directsync,block=none'),
cfg.StrOpt('rng_dev_path',
help='A path to a device that will be used as source of '
'entropy on the host. Permitted options are: '
'/dev/random or /dev/hwrng'),
cfg.ListOpt('hw_machine_type',
help='For qemu or KVM guests, set this option to specify '
'a default machine type per host architecture. '
'You can find a list of supported machine types '
'in your environment by checking the output of '
'the "virsh capabilities"command. The format of the '
'value for this config option is host-arch=machine-type. '
'For example: x86_64=machinetype1,armv7l=machinetype2'),
cfg.StrOpt('sysinfo_serial',
default='auto',
help='The data source used to the populate the host "serial" '
'UUID exposed to guest in the virtual BIOS. Permitted '
'options are "hardware", "os", "none" or "auto" '
'(default).'),
cfg.IntOpt('mem_stats_period_seconds',
default=10,
help='A number of seconds to memory usage statistics period. '
'Zero or negative value mean to disable memory usage '
'statistics.'),
cfg.ListOpt('uid_maps',
default=[],
help='List of uid targets and ranges.'
'Syntax is guest-uid:host-uid:count'
'Maximum of 5 allowed.'),
cfg.ListOpt('gid_maps',
default=[],
help='List of guid targets and ranges.'
'Syntax is guest-gid:host-gid:count'
'Maximum of 5 allowed.')
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('cipher', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('key_size', 'nova.compute.api',
group='ephemeral_storage_encryption')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
CONF.import_opt('server_proxyclient_address', 'nova.spice', group='spice')
CONF.import_opt('vcpu_pin_set', 'nova.virt.hardware')
CONF.import_opt('vif_plugging_is_fatal', 'nova.virt.driver')
CONF.import_opt('vif_plugging_timeout', 'nova.virt.driver')
CONF.import_opt('enabled', 'nova.console.serial', group='serial_console')
CONF.import_opt('proxyclient_address', 'nova.console.serial',
group='serial_console')
CONF.import_opt('hw_disk_discard', 'nova.virt.libvirt.imagebackend',
group='libvirt')
CONF.import_group('workarounds', 'nova.utils')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
libvirt_firewall.__name__,
libvirt_firewall.IptablesFirewallDriver.__name__)
MAX_CONSOLE_BYTES = 100 * units.Ki
# The libvirt driver will prefix any disable reason codes with this string.
DISABLE_PREFIX = 'AUTO: '
# Disable reason for the service which was enabled or disabled without reason
DISABLE_REASON_UNDEFINED = None
# Guest config console string
CONSOLE = "console=tty0 console=ttyS0"
GuestNumaConfig = collections.namedtuple(
'GuestNumaConfig', ['cpuset', 'cputune', 'numaconfig', 'numatune'])
libvirt_volume_drivers = [
'iscsi=nova.virt.libvirt.volume.LibvirtISCSIVolumeDriver',
'iser=nova.virt.libvirt.volume.LibvirtISERVolumeDriver',
'local=nova.virt.libvirt.volume.LibvirtVolumeDriver',
'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver',
'rbd=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'sheepdog=nova.virt.libvirt.volume.LibvirtNetVolumeDriver',
'nfs=nova.virt.libvirt.volume.LibvirtNFSVolumeDriver',
'smbfs=nova.virt.libvirt.volume.LibvirtSMBFSVolumeDriver',
'aoe=nova.virt.libvirt.volume.LibvirtAOEVolumeDriver',
'glusterfs=nova.virt.libvirt.volume.LibvirtGlusterfsVolumeDriver',
'fibre_channel=nova.virt.libvirt.volume.LibvirtFibreChannelVolumeDriver',
'scality=nova.virt.libvirt.volume.LibvirtScalityVolumeDriver',
'gpfs=nova.virt.libvirt.volume.LibvirtGPFSVolumeDriver',
'quobyte=nova.virt.libvirt.volume.LibvirtQuobyteVolumeDriver',
]
def patch_tpool_proxy():
"""eventlet.tpool.Proxy doesn't work with old-style class in __str__()
or __repr__() calls. See bug #962840 for details.
We perform a monkey patch to replace those two instance methods.
"""
def str_method(self):
return str(self._obj)
def repr_method(self):
return repr(self._obj)
tpool.Proxy.__str__ = str_method
tpool.Proxy.__repr__ = repr_method
patch_tpool_proxy()
VIR_DOMAIN_NOSTATE = 0
VIR_DOMAIN_RUNNING = 1
VIR_DOMAIN_BLOCKED = 2
VIR_DOMAIN_PAUSED = 3
VIR_DOMAIN_SHUTDOWN = 4
VIR_DOMAIN_SHUTOFF = 5
VIR_DOMAIN_CRASHED = 6
VIR_DOMAIN_PMSUSPENDED = 7
LIBVIRT_POWER_STATE = {
VIR_DOMAIN_NOSTATE: power_state.NOSTATE,
VIR_DOMAIN_RUNNING: power_state.RUNNING,
# NOTE(maoy): The DOMAIN_BLOCKED state is only valid in Xen.
# It means that the VM is running and the vCPU is idle. So,
# we map it to RUNNING
VIR_DOMAIN_BLOCKED: power_state.RUNNING,
VIR_DOMAIN_PAUSED: power_state.PAUSED,
# NOTE(maoy): The libvirt API doc says that DOMAIN_SHUTDOWN
# means the domain is being shut down. So technically the domain
# is still running. SHUTOFF is the real powered off state.
# But we will map both to SHUTDOWN anyway.
# http://libvirt.org/html/libvirt-libvirt.html
VIR_DOMAIN_SHUTDOWN: power_state.SHUTDOWN,
VIR_DOMAIN_SHUTOFF: power_state.SHUTDOWN,
VIR_DOMAIN_CRASHED: power_state.CRASHED,
VIR_DOMAIN_PMSUSPENDED: power_state.SUSPENDED,
}
MIN_LIBVIRT_VERSION = (0, 9, 11)
# When the above version matches/exceeds this version
# delete it & corresponding code using it
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION = (1, 1, 1)
# Live snapshot requirements
REQ_HYPERVISOR_LIVESNAPSHOT = "QEMU"
MIN_LIBVIRT_LIVESNAPSHOT_VERSION = (1, 0, 0)
MIN_QEMU_LIVESNAPSHOT_VERSION = (1, 3, 0)
# block size tuning requirements
MIN_LIBVIRT_BLOCKIO_VERSION = (0, 10, 2)
# BlockJobInfo management requirement
MIN_LIBVIRT_BLOCKJOBINFO_VERSION = (1, 1, 1)
# Relative block commit (feature is detected,
# this version is only used for messaging)
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION = (1, 2, 7)
# libvirt discard feature
MIN_LIBVIRT_DISCARD_VERSION = (1, 0, 6)
MIN_QEMU_DISCARD_VERSION = (1, 6, 0)
REQ_HYPERVISOR_DISCARD = "QEMU"
# While earlier versions could support NUMA reporting and
# NUMA placement, not until 1.2.7 was there the ability
# to pin guest nodes to host nodes, so mandate that. Without
# this the scheduler cannot make guaranteed decisions, as the
# guest placement may not match what was requested
MIN_LIBVIRT_NUMA_VERSION = (1, 2, 7)
# While earlier versions could support hugepage backed
# guests, not until 1.2.8 was there the ability to request
# a particular huge page size. Without this the scheduler
# cannot make guaranteed decisions, as the huge page size
# used by the guest may not match what was requested
MIN_LIBVIRT_HUGEPAGE_VERSION = (1, 2, 8)
# missing libvirt cpu pinning support
BAD_LIBVIRT_CPU_POLICY_VERSIONS = [(1, 2, 9, 2), (1, 2, 10)]
# fsFreeze/fsThaw requirement
MIN_LIBVIRT_FSFREEZE_VERSION = (1, 2, 5)
# Hyper-V paravirtualized time source
MIN_LIBVIRT_HYPERV_TIMER_VERSION = (1, 2, 2)
MIN_QEMU_HYPERV_TIMER_VERSION = (2, 0, 0)
MIN_LIBVIRT_HYPERV_FEATURE_VERSION = (1, 0, 0)
MIN_LIBVIRT_HYPERV_FEATURE_EXTRA_VERSION = (1, 1, 0)
MIN_QEMU_HYPERV_FEATURE_VERSION = (1, 1, 0)
# parallels driver support
MIN_LIBVIRT_PARALLELS_VERSION = (1, 2, 12)
class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
"supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
super(LibvirtDriver, self).__init__(virtapi)
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._host = host.Host(self.uri(), read_only,
lifecycle_event_handler=self.emit_event,
conn_event_handler=self._handle_conn_event)
self._initiator = None
self._fc_wwnns = None
self._fc_wwpns = None
self._caps = None
self._vcpu_total = 0
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self.virtapi,
host=self._host)
self.vif_driver = libvirt_vif.LibvirtGenericVIFDriver()
self.volume_drivers = driver.driver_dict_from_config(
self._get_volume_drivers(), self)
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
self.image_backend = imagebackend.Backend(CONF.use_cow_images)
self.disk_cachemodes = {}
self.valid_cachemodes = ["default",
"none",
"writethrough",
"writeback",
"directsync",
"unsafe",
]
self._conn_supports_start_paused = CONF.libvirt.virt_type in ('kvm',
'qemu')
for mode_str in CONF.libvirt.disk_cachemodes:
disk_type, sep, cache_mode = mode_str.partition('=')
if cache_mode not in self.valid_cachemodes:
LOG.warn(_LW('Invalid cachemode %(cache_mode)s specified '
'for disk type %(disk_type)s.'),
{'cache_mode': cache_mode, 'disk_type': disk_type})
continue
self.disk_cachemodes[disk_type] = cache_mode
self._volume_api = volume.API()
self._image_api = image.API()
self._events_delayed = {}
# Note(toabctl): During a reboot of a Xen domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
if CONF.libvirt.virt_type == "xen":
self._lifecycle_delay = 15
else:
self._lifecycle_delay = 0
sysinfo_serial_funcs = {
'none': lambda: None,
'hardware': self._get_host_sysinfo_serial_hardware,
'os': self._get_host_sysinfo_serial_os,
'auto': self._get_host_sysinfo_serial_auto,
}
self._sysinfo_serial_func = sysinfo_serial_funcs.get(
CONF.libvirt.sysinfo_serial)
if not self._sysinfo_serial_func:
raise exception.NovaException(
_("Unexpected sysinfo_serial setting '%(actual)s'. "
"Permitted values are %(expect)s'") %
{'actual': CONF.libvirt.sysinfo_serial,
'expect': ', '.join("'%s'" % k for k in
sysinfo_serial_funcs.keys())})
def _get_volume_drivers(self):
return libvirt_volume_drivers
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherent (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(CONF.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
def _set_cache_mode(self, conf):
"""Set cache mode on LibvirtConfigGuestDisk object."""
try:
source_type = conf.source_type
driver_cache = conf.driver_cache
except AttributeError:
return
cache_mode = self.disk_cachemodes.get(source_type,
driver_cache)
conf.driver_cache = cache_mode
def _do_quality_warnings(self):
"""Warn about untested driver configurations.
This will log a warning message about untested driver or host arch
configurations to indicate to administrators that the quality is
unknown. Currently, only qemu or kvm on intel 32- or 64-bit systems
is tested upstream.
"""
caps = self._host.get_capabilities()
hostarch = caps.host.cpu.arch
if (CONF.libvirt.virt_type not in ('qemu', 'kvm') or
hostarch not in (arch.I686, arch.X86_64)):
LOG.warn(_LW('The libvirt driver is not tested on '
'%(type)s/%(arch)s by the OpenStack project and '
'thus its quality can not be ensured. For more '
'information, see: https://wiki.openstack.org/wiki/'
'HypervisorSupportMatrix'),
{'type': CONF.libvirt.virt_type, 'arch': hostarch})
def _handle_conn_event(self, enabled, reason):
LOG.info(_LI("Connection event '%(enabled)d' reason '%(reason)s'"),
{'enabled': enabled, 'reason': reason})
self._set_host_enabled(enabled, reason)
def _version_to_string(self, version):
return '.'.join([str(x) for x in version])
def init_host(self, host):
self._host.initialize()
self._do_quality_warnings()
if (CONF.libvirt.virt_type == 'lxc' and
not (CONF.libvirt.uid_maps and CONF.libvirt.gid_maps)):
LOG.warn(_LW("Running libvirt-lxc without user namespaces is "
"dangerous. Containers spawned by Nova will be run "
"as the host's root user. It is highly suggested "
"that user namespaces be used in a public or "
"multi-tenant environment."))
# Stop libguestfs using KVM unless we're also configured
# to use this. This solves problem where people need to
# stop Nova use of KVM because nested-virt is broken
if CONF.libvirt.virt_type != "kvm":
guestfs.force_tcg()
if not self._host.has_min_version(MIN_LIBVIRT_VERSION):
raise exception.NovaException(
_('Nova requires libvirt version %s or greater.') %
self._version_to_string(MIN_LIBVIRT_VERSION))
if (CONF.libvirt.virt_type == 'parallels' and
not self._host.has_min_version(MIN_LIBVIRT_PARALLELS_VERSION)):
raise exception.NovaException(
_('Running Nova with parallels virt_type requires '
'libvirt version %s') %
self._version_to_string(MIN_LIBVIRT_PARALLELS_VERSION))
def _get_connection(self):
return self._host.get_connection()
_conn = property(_get_connection)
@staticmethod
def uri():
if CONF.libvirt.virt_type == 'uml':
uri = CONF.libvirt.connection_uri or 'uml:///system'
elif CONF.libvirt.virt_type == 'xen':
uri = CONF.libvirt.connection_uri or 'xen:///'
elif CONF.libvirt.virt_type == 'lxc':
uri = CONF.libvirt.connection_uri or 'lxc:///'
elif CONF.libvirt.virt_type == 'parallels':
uri = CONF.libvirt.connection_uri or 'parallels:///system'
else:
uri = CONF.libvirt.connection_uri or 'qemu:///system'
return uri
def instance_exists(self, instance):
"""Efficient override of base instance_exists method."""
try:
self._host.get_domain(instance)
return True
except exception.NovaException:
return False
def list_instances(self):
names = []
for dom in self._host.list_instance_domains(only_running=False):
names.append(dom.name())
return names
def list_instance_uuids(self):
uuids = []
for dom in self._host.list_instance_domains(only_running=False):
uuids.append(dom.UUIDString())
return uuids
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
for vif in network_info:
self.vif_driver.plug(instance, vif)
def _unplug_vifs(self, instance, network_info, ignore_errors):
"""Unplug VIFs from networks."""
for vif in network_info:
try:
self.vif_driver.unplug(instance, vif)
except exception.NovaException:
if not ignore_errors:
raise
def unplug_vifs(self, instance, network_info):
self._unplug_vifs(instance, network_info, False)
def _teardown_container(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
rootfs_dev = instance.system_metadata.get('rootfs_device_name')
disk.teardown_container(container_dir, rootfs_dev)
def _destroy(self, instance):
try:
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
old_domid = -1
if virt_dom is not None:
try:
old_domid = virt_dom.ID()
virt_dom.destroy()
except libvirt.libvirtError as e:
is_okay = False
errcode = e.get_error_code()
if errcode == libvirt.VIR_ERR_NO_DOMAIN:
# Domain already gone. This can safely be ignored.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_INVALID:
# If the instance is already shut off, we get this:
# Code=55 Error=Requested operation is not valid:
# domain is not running
state = self._get_power_state(virt_dom)
if state == power_state.SHUTDOWN:
is_okay = True
elif errcode == libvirt.VIR_ERR_INTERNAL_ERROR:
errmsg = e.get_error_message()
if (CONF.libvirt.virt_type == 'lxc' and
errmsg == 'internal error: '
'Some processes refused to die'):
# Some processes in the container didn't die
# fast enough for libvirt. The container will
# eventually die. For now, move on and let
# the wait_for_destroy logic take over.
is_okay = True
elif errcode == libvirt.VIR_ERR_OPERATION_TIMEOUT:
LOG.warn(_LW("Cannot destroy instance, operation time "
"out"),
instance=instance)
reason = _("operation time out")
raise exception.InstancePowerOffFailure(reason=reason)
if not is_okay:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error from libvirt during destroy. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
def _wait_for_destroy(expected_domid):
"""Called at an interval until the VM is gone."""
# NOTE(vish): If the instance disappears during the destroy
# we ignore it so the cleanup can still be
# attempted because we would prefer destroy to
# never fail.
try:
dom_info = self.get_info(instance)
state = dom_info.state
new_domid = dom_info.id
except exception.InstanceNotFound:
LOG.info(_LI("During wait destroy, instance disappeared."),
instance=instance)
raise loopingcall.LoopingCallDone()
if state == power_state.SHUTDOWN:
LOG.info(_LI("Instance destroyed successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
# NOTE(wangpan): If the instance was booted again after destroy,
# this may be a endless loop, so check the id of
# domain here, if it changed and the instance is
# still running, we should destroy it again.
# see https://bugs.launchpad.net/nova/+bug/1111213 for more details
if new_domid != expected_domid:
LOG.info(_LI("Instance may be started again."),
instance=instance)
kwargs['is_running'] = True
raise loopingcall.LoopingCallDone()
kwargs = {'is_running': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy,
old_domid)
timer.start(interval=0.5).wait()
if kwargs['is_running']:
LOG.info(_LI("Going to destroy instance again."),
instance=instance)
self._destroy(instance)
else:
# NOTE(GuanQiang): teardown container to avoid resource leak
if CONF.libvirt.virt_type == 'lxc':
self._teardown_container(instance)
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
self._destroy(instance)
self.cleanup(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
def _undefine_domain(self, instance):
try:
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
virt_dom = None
if virt_dom:
try:
try:
virt_dom.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags."
" Retrying with undefine", instance=instance)
virt_dom.undefine()
except AttributeError:
# NOTE(vish): Older versions of libvirt don't support
# undefine flags, so attempt to do the
# right thing.
try:
if virt_dom.hasManagedSaveImage(0):
virt_dom.managedSaveRemove(0)
except AttributeError:
pass
virt_dom.undefine()
except libvirt.libvirtError as e:
with excutils.save_and_reraise_exception():
errcode = e.get_error_code()
LOG.error(_LE('Error from libvirt during undefine. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e}, instance=instance)
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
if destroy_vifs:
self._unplug_vifs(instance, network_info, True)
retry = True
while retry:
try:
self.unfilter_instance(instance, network_info)
except libvirt.libvirtError as e:
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
state = power_state.SHUTDOWN
if state != power_state.SHUTDOWN:
LOG.warn(_LW("Instance may be still running, destroy "
"it again."), instance=instance)
self._destroy(instance)
else:
retry = False
errcode = e.get_error_code()
LOG.exception(_LE('Error from libvirt during unfilter. '
'Code=%(errcode)s Error=%(e)s'),
{'errcode': errcode, 'e': e},
instance=instance)
reason = "Error unfiltering instance."
raise exception.InstanceTerminationFailure(reason=reason)
except Exception:
retry = False
raise
else:
retry = False
# FIXME(wangpan): if the instance is booted again here, such as the
# the soft reboot operation boot it here, it will
# become "running deleted", should we check and destroy
# it at the end of this method?
# NOTE(vish): we disconnect from volumes regardless
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device']
if disk_dev is not None:
disk_dev = disk_dev.rpartition("/")[2]
if ('data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
try:
self._disconnect_volume(connection_info, disk_dev)
except Exception as exc:
with excutils.save_and_reraise_exception() as ctxt:
if destroy_disks:
# Don't block on Volume errors if we're trying to
# delete the instance as we may be partially created
# or deleted
ctxt.reraise = False
LOG.warn(_LW("Ignoring Volume Error on vol %(vol_id)s "
"during delete %(exc)s"),
{'vol_id': vol.get('volume_id'), 'exc': exc},
instance=instance)
if destroy_disks:
# NOTE(haomai): destroy volumes if needed
if CONF.libvirt.images_type == 'lvm':
self._cleanup_lvm(instance)
if CONF.libvirt.images_type == 'rbd':
self._cleanup_rbd(instance)
if destroy_disks or (
migrate_data and migrate_data.get('is_shared_block_storage',
False)):
attempts = int(instance.system_metadata.get('clean_attempts',
'0'))
success = self.delete_instance_files(instance)
# NOTE(mriedem): This is used in the _run_pending_deletes periodic
# task in the compute manager. The tight coupling is not great...
instance.system_metadata['clean_attempts'] = str(attempts + 1)
if success:
instance.cleaned = True
instance.save()
if CONF.serial_console.enabled:
serials = self._get_serial_ports_from_instance(instance)
for hostname, port in serials:
serial_console.release_port(host=hostname, port=port)
self._undefine_domain(instance)
def _detach_encrypted_volumes(self, instance):
"""Detaches encrypted volumes attached to instance."""
disks = jsonutils.loads(self.get_instance_disk_info(instance))
encrypted_volumes = filter(dmcrypt.is_encrypted,
[disk['path'] for disk in disks])
for path in encrypted_volumes:
dmcrypt.delete_volume(path)
def _get_serial_ports_from_instance(self, instance, mode=None):
"""Returns an iterator over serial port(s) configured on instance.
:param mode: Should be a value in (None, bind, connect)
"""
virt_dom = self._host.get_domain(instance)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
# The 'serial' device is the base for x86 platforms. Other platforms
# (e.g. kvm on system z = arch.S390X) can only use 'console' devices.
xpath_mode = "[@mode='%s']" % mode if mode else ""
serial_tcp = "./devices/serial[@type='tcp']/source" + xpath_mode
console_tcp = "./devices/console[@type='tcp']/source" + xpath_mode
tcp_devices = tree.findall(serial_tcp)
if len(tcp_devices) == 0:
tcp_devices = tree.findall(console_tcp)
for source in tcp_devices:
yield (source.get("host"), int(source.get("service")))
@staticmethod
def _get_rbd_driver():
return rbd_utils.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
def _cleanup_rbd(self, instance):
LibvirtDriver._get_rbd_driver().cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""
if instance.get('ephemeral_key_uuid') is not None:
self._detach_encrypted_volumes(instance)
disks = self._lvm_disks(instance)
if disks:
lvm.remove_volumes(disks)
def _lvm_disks(self, instance):
"""Returns all LVM disks for given instance object."""
if CONF.libvirt.images_volume_group:
vg = os.path.join('/dev', CONF.libvirt.images_volume_group)
if not os.path.exists(vg):
return []
pattern = '%s_' % instance.uuid
def belongs_to_instance(disk):
return disk.startswith(pattern)
def fullpath(name):
return os.path.join(vg, name)
logical_volumes = lvm.list_volumes(vg)
disk_names = filter(belongs_to_instance, logical_volumes)
disks = map(fullpath, disk_names)
return disks
return []
def get_volume_connector(self, instance):
if self._initiator is None:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.debug('Could not determine iscsi initiator name',
instance=instance)
if self._fc_wwnns is None:
self._fc_wwnns = libvirt_utils.get_fc_wwnns()
if not self._fc_wwnns or len(self._fc_wwnns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide node names',
instance=instance)
if self._fc_wwpns is None:
self._fc_wwpns = libvirt_utils.get_fc_wwpns()
if not self._fc_wwpns or len(self._fc_wwpns) == 0:
LOG.debug('Could not determine fibre channel '
'world wide port names',
instance=instance)
connector = {'ip': CONF.my_block_storage_ip,
'host': CONF.host}
if self._initiator:
connector['initiator'] = self._initiator
if self._fc_wwnns and self._fc_wwpns:
connector["wwnns"] = self._fc_wwnns
connector["wwpns"] = self._fc_wwpns
return connector
def _cleanup_resize(self, instance, network_info):
# NOTE(wangpan): we get the pre-grizzly instance path firstly,
# so the backup dir of pre-grizzly instance can
# be deleted correctly with grizzly or later nova.
pre_grizzly_name = libvirt_utils.get_instance_path(instance,
forceold=True)
target = pre_grizzly_name + '_resize'
if not os.path.exists(target):
target = libvirt_utils.get_instance_path(instance) + '_resize'
if os.path.exists(target):
# Deletion can fail over NFS, so retry the deletion as required.
# Set maximum attempt as 5, most test can remove the directory
# for the second time.
utils.execute('rm', '-rf', target, delay_on_retry=True,
attempts=5)
if instance.host != CONF.host:
self._undefine_domain(instance)
self.unplug_vifs(instance, network_info)
self.unfilter_instance(instance, network_info)
def _get_volume_driver(self, connection_info):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
return self.volume_drivers[driver_type]
def _connect_volume(self, connection_info, disk_info):
driver = self._get_volume_driver(connection_info)
driver.connect_volume(connection_info, disk_info)
def _disconnect_volume(self, connection_info, disk_dev):
driver = self._get_volume_driver(connection_info)
return driver.disconnect_volume(connection_info, disk_dev)
def _get_volume_config(self, connection_info, disk_info):
driver = self._get_volume_driver(connection_info)
return driver.get_config(connection_info, disk_info)
def _get_volume_encryptor(self, connection_info, encryption):
encryptor = encryptors.get_volume_encryptor(connection_info,
**encryption)
return encryptor
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
virt_dom = self._host.get_domain(instance)
disk_dev = mountpoint.rpartition("/")[2]
bdm = {
'device_name': disk_dev,
'disk_bus': disk_bus,
'device_type': device_type}
# Note(cfb): If the volume has a custom block size, check that
# that we are using QEMU/KVM and libvirt >= 0.10.2. The
# presence of a block size is considered mandatory by
# cinder so we fail if we can't honor the request.
data = {}
if ('data' in connection_info):
data = connection_info['data']
if ('logical_block_size' in data or 'physical_block_size' in data):
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Volume sets block size, but the current "
"libvirt hypervisor '%s' does not support custom "
"block size") % CONF.libvirt.virt_type
raise exception.InvalidHypervisorType(msg)
if not self._host.has_min_version(MIN_LIBVIRT_BLOCKIO_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_BLOCKIO_VERSION])
msg = _("Volume sets block size, but libvirt '%s' or later is "
"required.") % ver
raise exception.Invalid(msg)
disk_info = blockinfo.get_info_from_bdm(CONF.libvirt.virt_type,
image_meta, bdm)
self._connect_volume(connection_info, disk_info)
conf = self._get_volume_config(connection_info, disk_info)
self._set_cache_mode(conf)
try:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = self._get_power_state(virt_dom)
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
virt_dom.attachDeviceFlags(conf.to_xml(), flags)
except Exception as ex:
LOG.exception(_LE('Failed to attach volume at mountpoint: %s'),
mountpoint, instance=instance)
if isinstance(ex, libvirt.libvirtError):
errcode = ex.get_error_code()
if errcode == libvirt.VIR_ERR_OPERATION_FAILED:
self._disconnect_volume(connection_info, disk_dev)
raise exception.DeviceIsBusy(device=disk_dev)
with excutils.save_and_reraise_exception():
self._disconnect_volume(connection_info, disk_dev)
def _swap_volume(self, domain, disk_path, new_path, resize_to):
"""Swap existing disk with a new block device."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# Start copy with VIR_DOMAIN_REBASE_REUSE_EXT flag to
# allow writing to existing external volume file
domain.blockRebase(disk_path, new_path, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path,
libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT)
if resize_to:
# NOTE(alex_xu): domain.blockJobAbort isn't sync call. This
# is bug in libvirt. So we need waiting for the pivot is
# finished. libvirt bug #1119173
while self._wait_for_block_job(domain, disk_path,
wait_for_job_clean=True):
time.sleep(0.5)
domain.blockResize(disk_path, resize_to * units.Gi / units.Ki)
finally:
self._conn.defineXML(xml)
def swap_volume(self, old_connection_info,
new_connection_info, instance, mountpoint, resize_to):
virt_dom = self._host.get_domain(instance)
disk_dev = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
disk_info = {
'dev': disk_dev,
'bus': blockinfo.get_disk_bus_for_disk_dev(
CONF.libvirt.virt_type, disk_dev),
'type': 'disk',
}
self._connect_volume(new_connection_info, disk_info)
conf = self._get_volume_config(new_connection_info, disk_info)
if not conf.source_path:
self._disconnect_volume(new_connection_info, disk_dev)
raise NotImplementedError(_("Swap only supports host devices"))
# Save updates made in connection_info when connect_volume was called
volume_id = new_connection_info.get('serial')
bdm = objects.BlockDeviceMapping.get_by_volume_id(
nova_context.get_admin_context(), volume_id)
driver_bdm = driver_block_device.DriverVolumeBlockDevice(bdm)
driver_bdm['connection_info'] = new_connection_info
driver_bdm.save()
self._swap_volume(virt_dom, disk_dev, conf.source_path, resize_to)
self._disconnect_volume(old_connection_info, disk_dev)
@staticmethod
def _get_disk_xml(xml, device):
"""Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
return None
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is not None:
return etree.tostring(node)
def _get_existing_domain_xml(self, instance, network_info,
block_device_info=None):
try:
virt_dom = self._host.get_domain(instance)
xml = virt_dom.XMLDesc(0)
except exception.InstanceNotFound:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
xml = self._get_guest_xml(nova_context.get_admin_context(),
instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info)
return xml
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
disk_dev = mountpoint.rpartition("/")[2]
try:
virt_dom = self._host.get_domain(instance)
xml = self._get_disk_xml(virt_dom.XMLDesc(0), disk_dev)
if not xml:
raise exception.DiskNotFound(location=disk_dev)
else:
# NOTE(vish): We can always affect config because our
# domains are persistent, but we should only
# affect live if the domain is running.
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = self._get_power_state(virt_dom)
if state in (power_state.RUNNING, power_state.PAUSED):
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(xml, flags)
if encryption:
# The volume must be detached from the VM before
# disconnecting it from its encryptor. Otherwise, the
# encryptor may report that the volume is still in use.
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.detach_volume(**encryption)
except exception.InstanceNotFound:
# NOTE(zhaoqin): If the instance does not exist, _lookup_by_name()
# will throw InstanceNotFound exception. Need to
# disconnect volume under this circumstance.
LOG.warn(_LW("During detach_volume, instance disappeared."))
except libvirt.libvirtError as ex:
# NOTE(vish): This is called to cleanup volumes after live
# migration, so we should still disconnect even if
# the instance doesn't exist here anymore.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
# NOTE(vish):
LOG.warn(_LW("During detach_volume, instance disappeared."))
else:
raise
self._disconnect_volume(connection_info, disk_dev)
def attach_interface(self, instance, image_meta, vif):
virt_dom = self._host.get_domain(instance)
self.vif_driver.plug(instance, vif)
self.firewall_driver.setup_basic_filtering(instance, [vif])
cfg = self.vif_driver.get_config(instance, vif, image_meta,
instance.flavor,
CONF.libvirt.virt_type)
try:
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = self._get_power_state(virt_dom)
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.attachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError:
LOG.error(_LE('attaching network adapter failed.'),
instance=instance, exc_info=True)
self.vif_driver.unplug(instance, vif)
raise exception.InterfaceAttachFailed(
instance_uuid=instance.uuid)
def detach_interface(self, instance, vif):
virt_dom = self._host.get_domain(instance)
cfg = self.vif_driver.get_config(instance, vif, None, instance.flavor,
CONF.libvirt.virt_type)
try:
self.vif_driver.unplug(instance, vif)
flags = libvirt.VIR_DOMAIN_AFFECT_CONFIG
state = self._get_power_state(virt_dom)
if state == power_state.RUNNING or state == power_state.PAUSED:
flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE
virt_dom.detachDeviceFlags(cfg.to_xml(), flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("During detach_interface, "
"instance disappeared."),
instance=instance)
else:
LOG.error(_LE('detaching network adapter failed.'),
instance=instance, exc_info=True)
raise exception.InterfaceDetachFailed(
instance_uuid=instance.uuid)
def _create_snapshot_metadata(self, base, instance, img_fmt, snp_name):
metadata = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance.kernel_id,
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
'ramdisk_id': instance.ramdisk_id,
}
}
if instance.os_type:
metadata['properties']['os_type'] = instance.os_type
# NOTE(vish): glance forces ami disk format to be ami
if base.get('disk_format') == 'ami':
metadata['disk_format'] = 'ami'
else:
metadata['disk_format'] = img_fmt
metadata['container_format'] = base.get('container_format', 'bare')
return metadata
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+
"""
try:
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
base_image_ref = instance.image_ref
base = compute_utils.get_image_metadata(
context, self._image_api, base_image_ref, instance)
snapshot = self._image_api.get(context, image_id)
disk_path = libvirt_utils.find_disk(virt_dom)
source_format = libvirt_utils.get_disk_type(disk_path)
image_format = CONF.libvirt.snapshot_image_format or source_format
# NOTE(bfilippov): save lvm and rbd as raw
if image_format == 'lvm' or image_format == 'rbd':
image_format = 'raw'
metadata = self._create_snapshot_metadata(base,
instance,
image_format,
snapshot['name'])
snapshot_name = uuid.uuid4().hex
state = self._get_power_state(virt_dom)
# NOTE(rmk): Live snapshots require QEMU 1.3 and Libvirt 1.0.0.
# These restrictions can be relaxed as other configurations
# can be validated.
# NOTE(dgenin): Instances with LVM encrypted ephemeral storage require
# cold snapshots. Currently, checking for encryption is
# redundant because LVM supports only cold snapshots.
# It is necessary in case this situation changes in the
# future.
if (self._host.has_min_version(MIN_LIBVIRT_LIVESNAPSHOT_VERSION,
MIN_QEMU_LIVESNAPSHOT_VERSION,
REQ_HYPERVISOR_LIVESNAPSHOT)
and source_format not in ('lvm', 'rbd')
and not CONF.ephemeral_storage_encryption.enabled
and not CONF.workarounds.disable_libvirt_livesnapshot):
live_snapshot = True
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended. This operation also
# confirms the running instance, as opposed to the system as a
# whole, has a new enough version of the hypervisor (bug 1193146).
try:
virt_dom.blockJobAbort(disk_path, 0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
live_snapshot = False
else:
pass
else:
live_snapshot = False
# NOTE(rmk): We cannot perform live snapshots when a managedSave
# file is present, so we will use the cold/legacy method
# for instances which are shutdown.
if state == power_state.SHUTDOWN:
live_snapshot = False
# NOTE(dkang): managedSave does not work for LXC
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING or state == power_state.PAUSED:
self._detach_pci_devices(virt_dom,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, virt_dom)
virt_dom.managedSave(0)
snapshot_backend = self.image_backend.snapshot(instance,
disk_path,
image_type=source_format)
if live_snapshot:
LOG.info(_LI("Beginning live snapshot process"),
instance=instance)
else:
LOG.info(_LI("Beginning cold snapshot process"),
instance=instance)
update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
snapshot_directory = CONF.libvirt.snapshots_directory
fileutils.ensure_tree(snapshot_directory)
with utils.tempdir(dir=snapshot_directory) as tmpdir:
try:
out_path = os.path.join(tmpdir, snapshot_name)
if live_snapshot:
# NOTE(xqueralt): libvirt needs o+x in the temp directory
os.chmod(tmpdir, 0o701)
self._live_snapshot(context, instance, virt_dom, disk_path,
out_path, image_format, base)
else:
snapshot_backend.snapshot_extract(out_path, image_format)
finally:
new_dom = None
# NOTE(dkang): because previous managedSave is not called
# for LXC, _create_domain must not be called.
if CONF.libvirt.virt_type != 'lxc' and not live_snapshot:
if state == power_state.RUNNING:
new_dom = self._create_domain(domain=virt_dom)
elif state == power_state.PAUSED:
new_dom = self._create_domain(domain=virt_dom,
launch_flags=libvirt.VIR_DOMAIN_START_PAUSED)
if new_dom is not None:
self._attach_pci_devices(new_dom,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, new_dom)
LOG.info(_LI("Snapshot extracted, beginning image upload"),
instance=instance)
# Upload that image to the image service
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
with libvirt_utils.file_open(out_path) as image_file:
self._image_api.update(context,
image_id,
metadata,
image_file)
LOG.info(_LI("Snapshot image upload complete"),
instance=instance)
@staticmethod
def _wait_for_block_job(domain, disk_path, abort_on_error=False,
wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:returns: True if still in progress
False if completed
"""
status = domain.blockJobInfo(disk_path, 0)
if status == -1 and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
try:
cur = status.get('cur', 0)
end = status.get('end', 0)
except Exception:
return False
if wait_for_job_clean:
job_ended = not status
else:
job_ended = cur == end
return not job_ended
def _can_quiesce(self, image_meta):
if CONF.libvirt.virt_type not in ('kvm', 'qemu'):
return (False, _('Only KVM and QEMU are supported'))
if not self._host.has_min_version(MIN_LIBVIRT_FSFREEZE_VERSION):
ver = ".".join([str(x) for x in MIN_LIBVIRT_FSFREEZE_VERSION])
return (False, _('Quiescing requires libvirt version %(version)s '
'or greater') % {'version': ver})
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', '')
if not strutils.bool_from_string(hw_qga):
return (False, _('QEMU guest agent is not enabled'))
return (True, None)
def _set_quiesced(self, context, instance, image_meta, quiesced):
supported, reason = self._can_quiesce(image_meta)
if not supported:
raise exception.InstanceQuiesceNotSupported(
instance_id=instance.uuid, reason=reason)
try:
domain = self._host.get_domain(instance)
if quiesced:
domain.fsFreeze()
else:
domain.fsThaw()
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while quiescing %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s')
% {'instance_name': instance.name,
'error_code': error_code, 'ex': ex})
raise exception.NovaException(msg)
def quiesce(self, context, instance, image_meta):
"""Freeze the guest filesystems to prepare for snapshot.
The qemu-guest-agent must be setup to execute fsfreeze.
"""
self._set_quiesced(context, instance, image_meta, True)
def unquiesce(self, context, instance, image_meta):
"""Thaw the guest filesystems after snapshot."""
self._set_quiesced(context, instance, image_meta, False)
def _live_snapshot(self, context, instance, domain, disk_path, out_path,
image_format, image_meta):
"""Snapshot an instance without downtime."""
# Save a copy of the domain's persistent XML file
xml = domain.XMLDesc(
libvirt.VIR_DOMAIN_XML_INACTIVE |
libvirt.VIR_DOMAIN_XML_SECURE)
# Abort is an idempotent operation, so make sure any block
# jobs which may have failed are ended.
try:
domain.blockJobAbort(disk_path, 0)
except Exception:
pass
# NOTE (rmk): We are using shallow rebases as a workaround to a bug
# in QEMU 1.3. In order to do this, we need to create
# a destination image with the original backing file
# and matching size of the instance root disk.
src_disk_size = libvirt_utils.get_disk_size(disk_path)
src_back_path = libvirt_utils.get_disk_backing_file(disk_path,
basename=False)
disk_delta = out_path + '.delta'
libvirt_utils.create_cow_image(src_back_path, disk_delta,
src_disk_size)
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
require_quiesce = strutils.bool_from_string(
img_meta_prop.get('os_require_quiesce', ''))
if require_quiesce:
self.quiesce(context, instance, image_meta)
try:
# NOTE (rmk): blockRebase cannot be executed on persistent
# domains, so we need to temporarily undefine it.
# If any part of this block fails, the domain is
# re-defined regardless.
if domain.isPersistent():
domain.undefine()
# NOTE (rmk): Establish a temporary mirror of our root disk and
# issue an abort once we have a complete copy.
domain.blockRebase(disk_path, disk_delta, 0,
libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
while self._wait_for_block_job(domain, disk_path):
time.sleep(0.5)
domain.blockJobAbort(disk_path, 0)
libvirt_utils.chown(disk_delta, os.getuid())
finally:
self._conn.defineXML(xml)
if require_quiesce:
self.unquiesce(context, instance, image_meta)
# Convert the delta (CoW) image with a backing file to a flat
# image with no backing file.
libvirt_utils.extract_snapshot(disk_delta, 'qcow2',
out_path, image_format)
def _volume_snapshot_update_status(self, context, snapshot_id, status):
"""Send a snapshot status update to Cinder.
This method captures and logs exceptions that occur
since callers cannot do anything useful with these exceptions.
Operations on the Cinder side waiting for this will time out if
a failure occurs sending the update.
:param context: security context
:param snapshot_id: id of snapshot being updated
:param status: new status value
"""
try:
self._volume_api.update_snapshot_status(context,
snapshot_id,
status)
except Exception:
LOG.exception(_LE('Failed to send updated snapshot status '
'to volume service.'))
def _volume_snapshot_create(self, context, instance, domain,
volume_id, new_file):
"""Perform volume snapshot.
:param domain: VM that volume is attached to
:param volume_id: volume UUID to snapshot
:param new_file: relative path to new qcow2 file present on share
"""
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
disks_to_snap = [] # to be snapshotted by libvirt
network_disks_to_snap = [] # network disks (netfs, gluster, etc.)
disks_to_skip = [] # local disks not snapshotted
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None):
continue
if (guest_disk.serial is None or guest_disk.serial != volume_id):
disks_to_skip.append(guest_disk.target_dev)
continue
# disk is a Cinder volume with the correct volume_id
disk_info = {
'dev': guest_disk.target_dev,
'serial': guest_disk.serial,
'current_file': guest_disk.source_path,
'source_protocol': guest_disk.source_protocol,
'source_name': guest_disk.source_name,
'source_hosts': guest_disk.source_hosts,
'source_ports': guest_disk.source_ports
}
# Determine path for new_file based on current path
if disk_info['current_file'] is not None:
current_file = disk_info['current_file']
new_file_path = os.path.join(os.path.dirname(current_file),
new_file)
disks_to_snap.append((current_file, new_file_path))
elif disk_info['source_protocol'] in ('gluster', 'netfs'):
network_disks_to_snap.append((disk_info, new_file))
if not disks_to_snap and not network_disks_to_snap:
msg = _('Found no disk to snapshot.')
raise exception.NovaException(msg)
snapshot = vconfig.LibvirtConfigGuestSnapshot()
for current_name, new_filename in disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = current_name
snap_disk.source_path = new_filename
snap_disk.source_type = 'file'
snap_disk.snapshot = 'external'
snap_disk.driver_name = 'qcow2'
snapshot.add_disk(snap_disk)
for disk_info, new_filename in network_disks_to_snap:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = disk_info['dev']
snap_disk.source_type = 'network'
snap_disk.source_protocol = disk_info['source_protocol']
snap_disk.snapshot = 'external'
snap_disk.source_path = new_filename
old_dir = disk_info['source_name'].split('/')[0]
snap_disk.source_name = '%s/%s' % (old_dir, new_filename)
snap_disk.source_hosts = disk_info['source_hosts']
snap_disk.source_ports = disk_info['source_ports']
snapshot.add_disk(snap_disk)
for dev in disks_to_skip:
snap_disk = vconfig.LibvirtConfigGuestSnapshotDisk()
snap_disk.name = dev
snap_disk.snapshot = 'no'
snapshot.add_disk(snap_disk)
snapshot_xml = snapshot.to_xml()
LOG.debug("snap xml: %s", snapshot_xml)
snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
QUIESCE = libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE
try:
domain.snapshotCreateXML(snapshot_xml,
snap_flags | QUIESCE)
return
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create quiesced VM snapshot, '
'attempting again with quiescing disabled.'))
try:
domain.snapshotCreateXML(snapshot_xml, snap_flags)
except libvirt.libvirtError:
LOG.exception(_LE('Unable to create VM snapshot, '
'failing volume_snapshot operation.'))
raise
def _volume_refresh_connection_info(self, context, instance, volume_id):
bdm = objects.BlockDeviceMapping.get_by_volume_id(context,
volume_id)
driver_bdm = driver_block_device.convert_volume(bdm)
if driver_bdm:
driver_bdm.refresh_connection_info(context, instance,
self._volume_api, self)
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Create snapshots of a Cinder volume via libvirt.
:param instance: VM instance object reference
:param volume_id: id of volume being snapshotted
:param create_info: dict of information used to create snapshots
- snapshot_id : ID of snapshot
- type : qcow2 / <other>
- new_file : qcow2 file created by Cinder which
becomes the VM's active image after
the snapshot is complete
"""
LOG.debug("volume_snapshot_create: create_info: %(c_info)s",
{'c_info': create_info}, instance=instance)
try:
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
if create_info['type'] != 'qcow2':
raise exception.NovaException(_('Unknown type: %s') %
create_info['type'])
snapshot_id = create_info.get('snapshot_id', None)
if snapshot_id is None:
raise exception.NovaException(_('snapshot_id required '
'in create_info'))
try:
self._volume_snapshot_create(context, instance, virt_dom,
volume_id, create_info['new_file'])
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_create, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error')
self._volume_snapshot_update_status(
context, snapshot_id, 'creating')
def _wait_for_snapshot():
snapshot = self._volume_api.get_snapshot(context, snapshot_id)
if snapshot.get('status') != 'creating':
self._volume_refresh_connection_info(context, instance,
volume_id)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot)
timer.start(interval=0.5).wait()
def _volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info=None):
"""Note:
if file being merged into == active image:
do a blockRebase (pull) operation
else:
do a blockCommit operation
Files must be adjacent in snap chain.
:param instance: instance object reference
:param volume_id: volume UUID
:param snapshot_id: snapshot UUID (unused currently)
:param delete_info: {
'type': 'qcow2',
'file_to_merge': 'a.img',
'merge_target_file': 'b.img' or None (if merging file_to_merge into
active image)
}
Libvirt blockjob handling required for this method is broken
in versions of libvirt that do not contain:
http://libvirt.org/git/?p=libvirt.git;h=0f9e67bfad (1.1.1)
(Patch is pending in 1.0.5-maint branch as well, but we cannot detect
libvirt 1.0.5.5 vs. 1.0.5.6 here.)
"""
if not self._host.has_min_version(MIN_LIBVIRT_BLOCKJOBINFO_VERSION):
ver = '.'.join([str(x) for x in MIN_LIBVIRT_BLOCKJOBINFO_VERSION])
msg = _("Libvirt '%s' or later is required for online deletion "
"of volume snapshots.") % ver
raise exception.Invalid(msg)
LOG.debug('volume_snapshot_delete: delete_info: %s', delete_info)
if delete_info['type'] != 'qcow2':
msg = _('Unknown delete_info type %s') % delete_info['type']
raise exception.NovaException(msg)
try:
virt_dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
raise exception.InstanceNotRunning(instance_id=instance.uuid)
# Find dev name
my_dev = None
active_disk = None
xml = virt_dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
device_info = vconfig.LibvirtConfigGuest()
device_info.parse_dom(xml_doc)
active_disk_object = None
for guest_disk in device_info.devices:
if (guest_disk.root_name != 'disk'):
continue
if (guest_disk.target_dev is None or guest_disk.serial is None):
continue
if guest_disk.serial == volume_id:
my_dev = guest_disk.target_dev
active_disk = guest_disk.source_path
active_protocol = guest_disk.source_protocol
active_disk_object = guest_disk
break
if my_dev is None or (active_disk is None and active_protocol is None):
msg = _('Disk with id: %s '
'not found attached to instance.') % volume_id
LOG.debug('Domain XML: %s', xml)
raise exception.NovaException(msg)
LOG.debug("found device at %s", my_dev)
def _get_snap_dev(filename, backing_store):
if filename is None:
msg = _('filename cannot be None')
raise exception.NovaException(msg)
# libgfapi delete
LOG.debug("XML: %s" % xml)
LOG.debug("active disk object: %s" % active_disk_object)
# determine reference within backing store for desired image
filename_to_merge = filename
matched_name = None
b = backing_store
index = None
current_filename = active_disk_object.source_name.split('/')[1]
if current_filename == filename_to_merge:
return my_dev + '[0]'
while b is not None:
source_filename = b.source_name.split('/')[1]
if source_filename == filename_to_merge:
LOG.debug('found match: %s' % b.source_name)
matched_name = b.source_name
index = b.index
break
b = b.backing_store
if matched_name is None:
msg = _('no match found for %s') % (filename_to_merge)
raise exception.NovaException(msg)
LOG.debug('index of match (%s) is %s' % (b.source_name, index))
my_snap_dev = '%s[%s]' % (my_dev, index)
return my_snap_dev
if delete_info['merge_target_file'] is None:
# pull via blockRebase()
# Merge the most recent snapshot into the active image
rebase_disk = my_dev
rebase_flags = 0
rebase_base = delete_info['file_to_merge'] # often None
if active_protocol is not None:
rebase_base = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
rebase_bw = 0
LOG.debug('disk: %(disk)s, base: %(base)s, '
'bw: %(bw)s, flags: %(flags)s',
{'disk': rebase_disk,
'base': rebase_base,
'bw': rebase_bw,
'flags': rebase_flags})
result = virt_dom.blockRebase(rebase_disk, rebase_base,
rebase_bw, rebase_flags)
if result == 0:
LOG.debug('blockRebase started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockRebase job completion')
time.sleep(0.5)
else:
# commit with blockCommit()
my_snap_base = None
my_snap_top = None
commit_disk = my_dev
commit_flags = 0
if active_protocol is not None:
my_snap_base = _get_snap_dev(delete_info['merge_target_file'],
active_disk_object.backing_store)
my_snap_top = _get_snap_dev(delete_info['file_to_merge'],
active_disk_object.backing_store)
try:
commit_flags |= libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
except AttributeError:
ver = '.'.join(
[str(x) for x in
MIN_LIBVIRT_BLOCKCOMMIT_RELATIVE_VERSION])
msg = _("Relative blockcommit support was not detected. "
"Libvirt '%s' or later is required for online "
"deletion of network storage-backed volume "
"snapshots.") % ver
raise exception.Invalid(msg)
commit_base = my_snap_base or delete_info['merge_target_file']
commit_top = my_snap_top or delete_info['file_to_merge']
bandwidth = 0
LOG.debug('will call blockCommit with commit_disk=%(commit_disk)s '
'commit_base=%(commit_base)s '
'commit_top=%(commit_top)s '
% {'commit_disk': commit_disk,
'commit_base': commit_base,
'commit_top': commit_top})
result = virt_dom.blockCommit(commit_disk, commit_base, commit_top,
bandwidth, commit_flags)
if result == 0:
LOG.debug('blockCommit started successfully')
while self._wait_for_block_job(virt_dom, my_dev,
abort_on_error=True):
LOG.debug('waiting for blockCommit job completion')
time.sleep(0.5)
def volume_snapshot_delete(self, context, instance, volume_id, snapshot_id,
delete_info):
try:
self._volume_snapshot_delete(context, instance, volume_id,
snapshot_id, delete_info=delete_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Error occurred during '
'volume_snapshot_delete, '
'sending error status to Cinder.'))
self._volume_snapshot_update_status(
context, snapshot_id, 'error_deleting')
self._volume_snapshot_update_status(context, snapshot_id, 'deleting')
self._volume_refresh_connection_info(context, instance, volume_id)
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot a virtual machine, given an instance reference."""
if reboot_type == 'SOFT':
# NOTE(vish): This will attempt to do a graceful shutdown/restart.
try:
soft_reboot_success = self._soft_reboot(instance)
except libvirt.libvirtError as e:
LOG.debug("Instance soft reboot failed: %s", e)
soft_reboot_success = False
if soft_reboot_success:
LOG.info(_LI("Instance soft rebooted successfully."),
instance=instance)
return
else:
LOG.warn(_LW("Failed to soft reboot instance. "
"Trying hard reboot."),
instance=instance)
return self._hard_reboot(context, instance, network_info,
block_device_info)
def _soft_reboot(self, instance):
"""Attempt to shutdown and restart the instance gracefully.
We use shutdown and create here so we can return if the guest
responded and actually rebooted. Note that this method only
succeeds if the guest responds to acpi. Therefore we return
success or failure so we can fall back to a hard reboot if
necessary.
:returns: True if the reboot succeeded
"""
dom = self._host.get_domain(instance)
state = self._get_power_state(dom)
old_domid = dom.ID()
# NOTE(vish): This check allows us to reboot an instance that
# is already shutdown.
if state == power_state.RUNNING:
dom.shutdown()
# NOTE(vish): This actually could take slightly longer than the
# FLAG defines depending on how long the get_info
# call takes to return.
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
for x in xrange(CONF.libvirt.wait_soft_reboot_seconds):
dom = self._host.get_domain(instance)
state = self._get_power_state(dom)
new_domid = dom.ID()
# NOTE(ivoks): By checking domain IDs, we make sure we are
# not recreating domain that's already running.
if old_domid != new_domid:
if state in [power_state.SHUTDOWN,
power_state.CRASHED]:
LOG.info(_LI("Instance shutdown successfully."),
instance=instance)
self._create_domain(domain=dom)
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running, instance)
timer.start(interval=0.5).wait()
return True
else:
LOG.info(_LI("Instance may have been rebooted during soft "
"reboot, so return now."), instance=instance)
return True
greenthread.sleep(1)
return False
def _hard_reboot(self, context, instance, network_info,
block_device_info=None):
"""Reboot a virtual machine, given an instance reference.
Performs a Libvirt reset (if supported) on the domain.
If Libvirt reset is unavailable this method actually destroys and
re-creates the domain to ensure the reboot happens, as the guest
OS cannot ignore this action.
If xml is set, it uses the passed in xml in place of the xml from the
existing domain.
"""
self._destroy(instance)
# Convert the system metadata to image metadata
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# NOTE(stpierre): In certain cases -- e.g., when booting a
# guest to restore its state after restarting
# Nova compute -- the context is not
# populated, which causes this (and
# _create_images_and_backing below) to error.
if not image_meta and context.auth_token is not None:
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context,
self._image_api,
image_ref,
instance)
instance_dir = libvirt_utils.get_instance_path(instance)
fileutils.ensure_tree(instance_dir)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# NOTE(vish): This could generate the wrong device_format if we are
# using the raw backend and the images don't exist yet.
# The create_images_and_backing below doesn't properly
# regenerate raw backend images, however, so when it
# does we need to (re)generate the xml after the images
# are in place.
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE (rmk): Re-populate any missing backing files.
disk_info_json = self._get_instance_disk_info(instance.name, xml,
block_device_info)
if context.auth_token is not None:
self._create_images_and_backing(context, instance, instance_dir,
disk_info_json)
# Initialize all the necessary networking, block devices and
# start the instance.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
reboot=True,
vifs_already_plugged=True)
self._prepare_pci_devices_for_use(
pci_manager.get_instance_pci_devs(instance, 'all'))
def _wait_for_reboot():
"""Called at an interval until the VM is running again."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance rebooted successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_reboot)
timer.start(interval=0.5).wait()
def pause(self, instance):
"""Pause VM instance."""
dom = self._host.get_domain(instance)
dom.suspend()
def unpause(self, instance):
"""Unpause paused VM instance."""
dom = self._host.get_domain(instance)
dom.resume()
def _clean_shutdown(self, instance, timeout, retry_interval):
"""Attempt to shutdown the instance gracefully.
:param instance: The instance to be shutdown
:param timeout: How long to wait in seconds for the instance to
shutdown
:param retry_interval: How often in seconds to signal the instance
to shutdown while waiting
:returns: True if the shutdown succeeded
"""
# List of states that represent a shutdown instance
SHUTDOWN_STATES = [power_state.SHUTDOWN,
power_state.CRASHED]
try:
dom = self._host.get_domain(instance)
except exception.InstanceNotFound:
# If the instance has gone then we don't need to
# wait for it to shutdown
return True
state = self._get_power_state(dom)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance already shutdown."),
instance=instance)
return True
LOG.debug("Shutting down instance from state %s", state,
instance=instance)
dom.shutdown()
retry_countdown = retry_interval
for sec in six.moves.range(timeout):
dom = self._host.get_domain(instance)
state = self._get_power_state(dom)
if state in SHUTDOWN_STATES:
LOG.info(_LI("Instance shutdown successfully after %d "
"seconds."), sec, instance=instance)
return True
# Note(PhilD): We can't assume that the Guest was able to process
# any previous shutdown signal (for example it may
# have still been startingup, so within the overall
# timeout we re-trigger the shutdown every
# retry_interval
if retry_countdown == 0:
retry_countdown = retry_interval
# Instance could shutdown at any time, in which case we
# will get an exception when we call shutdown
try:
LOG.debug("Instance in state %s after %d seconds - "
"resending shutdown", state, sec,
instance=instance)
dom.shutdown()
except libvirt.libvirtError:
# Assume this is because its now shutdown, so loop
# one more time to clean up.
LOG.debug("Ignoring libvirt exception from shutdown "
"request.", instance=instance)
continue
else:
retry_countdown -= 1
time.sleep(1)
LOG.info(_LI("Instance failed to shutdown in %d seconds."),
timeout, instance=instance)
return False
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance."""
if timeout:
self._clean_shutdown(instance, timeout, retry_interval)
self._destroy(instance)
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance."""
# We use _hard_reboot here to ensure that all backing files,
# network, and block device connections, etc. are established
# and available before we attempt to start the instance.
self._hard_reboot(context, instance, network_info, block_device_info)
def suspend(self, context, instance):
"""Suspend the specified instance."""
dom = self._host.get_domain(instance)
self._detach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
self._detach_sriov_ports(context, instance, dom)
dom.managedSave(0)
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance."""
image_meta = compute_utils.get_image_metadata(context,
self._image_api, instance.image_ref, instance)
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance, image_meta,
block_device_info=block_device_info)
xml = self._get_existing_domain_xml(instance, network_info,
block_device_info)
dom = self._create_domain_and_network(context, xml, instance,
network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)
self._attach_pci_devices(dom,
pci_manager.get_instance_pci_devs(instance))
self._attach_sriov_ports(context, instance, dom, network_info)
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
# Check if the instance is running already and avoid doing
# anything if it is.
try:
domain = self._host.get_domain(instance)
state = self._get_power_state(domain)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
if state in ignored_states:
return
except exception.NovaException:
pass
# Instance is not up and could be in an unknown state.
# Be as absolute as possible about getting it back into
# a known and running state.
self._hard_reboot(context, instance, network_info, block_device_info)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
primary images and data needs to be corrected/recovered. Rescuing
should not edit or over-ride the original image, only allow for
data recovery.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml = self._get_existing_domain_xml(instance, network_info)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
libvirt_utils.write_to_file(unrescue_xml_path, unrescue_xml)
if image_meta is not None:
rescue_image_id = image_meta.get('id')
else:
rescue_image_id = None
rescue_images = {
'image_id': (rescue_image_id or
CONF.libvirt.rescue_image_id or instance.image_ref),
'kernel_id': (CONF.libvirt.rescue_kernel_id or
instance.kernel_id),
'ramdisk_id': (CONF.libvirt.rescue_ramdisk_id or
instance.ramdisk_id),
}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
rescue=True)
self._create_image(context, instance, disk_info['mapping'],
suffix='.rescue', disk_images=rescue_images,
network_info=network_info,
admin_pass=rescue_password)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta, rescue=rescue_images,
write_to_disk=True)
self._destroy(instance)
self._create_domain(xml)
def unrescue(self, instance, network_info):
"""Reboot the VM which is being rescued back into primary images.
"""
instance_dir = libvirt_utils.get_instance_path(instance)
unrescue_xml_path = os.path.join(instance_dir, 'unrescue.xml')
xml = libvirt_utils.load_file(unrescue_xml_path)
virt_dom = self._host.get_domain(instance)
self._destroy(instance)
self._create_domain(xml, virt_dom)
libvirt_utils.file_delete(unrescue_xml_path)
rescue_files = os.path.join(instance_dir, "*.rescue")
for rescue_file in glob.iglob(rescue_files):
libvirt_utils.file_delete(rescue_file)
# cleanup rescue volume
lvm.remove_volumes([lvmdisk for lvmdisk in self._lvm_disks(instance)
if lvmdisk.endswith('.rescue')])
def poll_rebooting_instances(self, timeout, instances):
pass
def _enable_hairpin(self, xml):
interfaces = self._get_interfaces(xml)
for interface in interfaces:
utils.execute('tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
self._create_image(context, instance,
disk_info['mapping'],
network_info=network_info,
block_device_info=block_device_info,
files=injected_files,
admin_pass=admin_password)
xml = self._get_guest_xml(context, instance, network_info,
disk_info, image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info)
LOG.debug("Instance is running", instance=instance)
def _wait_for_boot():
"""Called at an interval until the VM is running."""
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance spawned successfully."),
instance=instance)
raise loopingcall.LoopingCallDone()
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
timer.start(interval=0.5).wait()
def _flush_libvirt_console(self, pty):
out, err = utils.execute('dd',
'if=%s' % pty,
'iflag=nonblock',
run_as_root=True,
check_exit_code=False)
return out
def _append_to_file(self, data, fpath):
LOG.info(_LI('data: %(data)r, fpath: %(fpath)r'),
{'data': data, 'fpath': fpath})
with open(fpath, 'a+') as fp:
fp.write(data)
return fpath
def get_console_output(self, context, instance):
virt_dom = self._host.get_domain(instance)
xml = virt_dom.XMLDesc(0)
tree = etree.fromstring(xml)
console_types = {}
# NOTE(comstud): We want to try 'file' types first, then try 'pty'
# types. We can't use Python 2.7 syntax of:
# tree.find("./devices/console[@type='file']/source")
# because we need to support 2.6.
console_nodes = tree.findall('./devices/console')
for console_node in console_nodes:
console_type = console_node.get('type')
console_types.setdefault(console_type, [])
console_types[console_type].append(console_node)
# If the guest has a console logging to a file prefer to use that
if console_types.get('file'):
for file_console in console_types.get('file'):
source_node = file_console.find('./source')
if source_node is None:
continue
path = source_node.get("path")
if not path:
continue
if not os.path.exists(path):
LOG.info(_LI('Instance is configured with a file console, '
'but the backing file is not (yet?) present'),
instance=instance)
return ""
libvirt_utils.chown(path, os.getuid())
with libvirt_utils.file_open(path, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp,
MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'), remaining,
instance=instance)
return log_data
# Try 'pty' types
if console_types.get('pty'):
for pty_console in console_types.get('pty'):
source_node = pty_console.find('./source')
if source_node is None:
continue
pty = source_node.get("path")
if not pty:
continue
break
else:
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
self._chown_console_log_for_instance(instance)
data = self._flush_libvirt_console(pty)
console_log = self._get_console_log_path(instance)
fpath = self._append_to_file(data, console_log)
with libvirt_utils.file_open(fpath, 'rb') as fp:
log_data, remaining = utils.last_bytes(fp, MAX_CONSOLE_BYTES)
if remaining > 0:
LOG.info(_LI('Truncated console log returned, '
'%d bytes ignored'),
remaining, instance=instance)
return log_data
@staticmethod
def get_host_ip_addr():
ips = compute_utils.get_machine_ips()
if CONF.my_ip not in ips:
LOG.warn(_LW('my_ip address (%(my_ip)s) was not found on '
'any of the interfaces: %(ifaces)s'),
{'my_ip': CONF.my_ip, 'ifaces': ", ".join(ips)})
return CONF.my_ip
def get_vnc_console(self, context, instance):
def get_vnc_port_for_instance(instance_name):
virt_dom = self._host.get_domain(instance)
xml = virt_dom.XMLDesc(0)
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='vnc']")
if graphic is not None:
return graphic.get('port')
# NOTE(rmk): We had VNC consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='vnc')
port = get_vnc_port_for_instance(instance.name)
host = CONF.vncserver_proxyclient_address
return ctype.ConsoleVNC(host=host, port=port)
def get_spice_console(self, context, instance):
def get_spice_ports_for_instance(instance_name):
virt_dom = self._host.get_domain(instance)
xml = virt_dom.XMLDesc(0)
xml_dom = etree.fromstring(xml)
graphic = xml_dom.find("./devices/graphics[@type='spice']")
if graphic is not None:
return (graphic.get('port'), graphic.get('tlsPort'))
# NOTE(rmk): We had Spice consoles enabled but the instance in
# question is not actually listening for connections.
raise exception.ConsoleTypeUnavailable(console_type='spice')
ports = get_spice_ports_for_instance(instance.name)
host = CONF.spice.server_proxyclient_address
return ctype.ConsoleSpice(host=host, port=ports[0], tlsPort=ports[1])
def get_serial_console(self, context, instance):
for hostname, port in self._get_serial_ports_from_instance(
instance, mode='bind'):
return ctype.ConsoleSerial(host=hostname, port=port)
raise exception.ConsoleTypeUnavailable(console_type='serial')
@staticmethod
def _supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 512 byte alignment
align_size = 512
m = mmap.mmap(-1, align_size)
m.write(r"x" * align_size)
os.write(f, m)
os.close(f)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'"),
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'"), {'path': dirpath, 'ex': e})
finally:
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
"""Create a blank image of specified size."""
libvirt_utils.create_image('raw', target,
'%d%c' % (local_size, unit))
def _create_ephemeral(self, target, ephemeral_size,
fs_label, os_type, is_block_dev=False,
max_size=None, context=None, specified_fs=None):
if not is_block_dev:
self._create_local(target, ephemeral_size)
# Run as root only for block devices.
disk.mkfs(os_type, fs_label, target, run_as_root=is_block_dev,
specified_fs=specified_fs)
@staticmethod
def _create_swap(target, swap_mb, max_size=None, context=None):
"""Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@staticmethod
def _get_console_log_path(instance):
return os.path.join(libvirt_utils.get_instance_path(instance),
'console.log')
@staticmethod
def _get_disk_config_path(instance, suffix=''):
return os.path.join(libvirt_utils.get_instance_path(instance),
'disk.config' + suffix)
def _chown_console_log_for_instance(self, instance):
console_log = self._get_console_log_path(instance)
if os.path.exists(console_log):
libvirt_utils.chown(console_log, os.getuid())
def _chown_disk_config_for_instance(self, instance):
disk_config = self._get_disk_config_path(instance)
if os.path.exists(disk_config):
libvirt_utils.chown(disk_config, os.getuid())
@staticmethod
def _is_booted_from_volume(instance, disk_mapping):
"""Determines whether the VM is booting from volume
Determines whether the disk mapping indicates that the VM
is booting from a volume.
"""
return ((not bool(instance.get('image_ref')))
or 'disk' not in disk_mapping)
def _inject_data(self, instance, network_info, admin_pass, files, suffix):
"""Injects data in a disk image
Helper used for injecting data in a disk image file system.
Keyword arguments:
instance -- a dict that refers instance specifications
network_info -- a dict that refers network speficications
admin_pass -- a string used to set an admin password
files -- a list of files needs to be injected
suffix -- a string used as an image name suffix
"""
# Handles the partition need to be used.
target_partition = None
if not instance.kernel_id:
target_partition = CONF.libvirt.inject_partition
if target_partition == 0:
target_partition = None
if CONF.libvirt.virt_type == 'lxc':
target_partition = None
# Handles the key injection.
if CONF.libvirt.inject_key and instance.get('key_data'):
key = str(instance.key_data)
else:
key = None
# Handles the admin password injection.
if not CONF.libvirt.inject_password:
admin_pass = None
# Handles the network injection.
net = netutils.get_injected_network_template(
network_info, libvirt_virt_type=CONF.libvirt.virt_type)
# Handles the metadata injection
metadata = instance.get('metadata')
image_type = CONF.libvirt.images_type
if any((key, net, metadata, admin_pass, files)):
injection_image = self.image_backend.image(
instance,
'disk' + suffix,
image_type)
img_id = instance.image_ref
if not injection_image.check_image_exists():
LOG.warn(_LW('Image %s not found on disk storage. '
'Continue without injecting data'),
injection_image.path, instance=instance)
return
try:
disk.inject_data(injection_image.path,
key, net, metadata, admin_pass, files,
partition=target_partition,
use_cow=CONF.use_cow_images,
mandatory=('files',))
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error injecting data into image '
'%(img_id)s (%(e)s)'),
{'img_id': img_id, 'e': e},
instance=instance)
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
LOG.info(_LI('Creating image'), instance=instance)
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance.user_id,
project_id=instance.project_id)
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance.user_id,
project_id=instance.project_id)
inst_type = instance.get_flavor()
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
size = instance.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
fetch_func = libvirt_utils.fetch_image
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
# Lookup the filesystem type if required
os_type_with_default = disk.get_fs_type_for_os_type(instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
file_extension = disk.get_file_extension_for_os_type(
os_type_with_default)
ephemeral_gb = instance.ephemeral_gb
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
# Config drive
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
if admin_pass:
extra_md['admin_pass'] = admin_pass
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
elif inject_files and CONF.libvirt.inject_partition != -2:
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
def _prepare_pci_devices_for_use(self, pci_devices):
# kvm , qemu support managed mode
# In managed mode, the configured device will be automatically
# detached from the host OS drivers when the guest is started,
# and then re-attached when the guest shuts down.
if CONF.libvirt.virt_type != 'xen':
# we do manual detach only for xen
return
try:
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
# Note(yjiang5) Spelling for 'dettach' is correct, see
# http://libvirt.org/html/libvirt-libvirt.html.
libvirt_dev.dettach()
# Note(yjiang5): A reset of one PCI device may impact other
# devices on the same bus, thus we need two separated loops
# to detach and then reset it.
for dev in pci_devices:
libvirt_dev_addr = dev['hypervisor_name']
libvirt_dev = \
self._conn.nodeDeviceLookupByName(libvirt_dev_addr)
libvirt_dev.reset()
except libvirt.libvirtError as exc:
raise exception.PciDevicePrepareFailed(id=dev['id'],
instance_uuid=
dev['instance_uuid'],
reason=six.text_type(exc))
def _detach_pci_devices(self, dom, pci_devs):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if not had this version
if not self._host.has_min_version(MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
if pci_devs:
reason = (_("Detaching PCI devices with libvirt < %(ver)s"
" is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=pci_devs)
try:
for dev in pci_devs:
dom.detachDeviceFlags(self._get_guest_pci_device(dev).to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
# after detachDeviceFlags returned, we should check the dom to
# ensure the detaching is finished
xml = dom.XMLDesc(0)
xml_doc = etree.fromstring(xml)
guest_config = vconfig.LibvirtConfigGuest()
guest_config.parse_dom(xml_doc)
for hdev in [d for d in guest_config.devices
if isinstance(d, vconfig.LibvirtConfigGuestHostdevPCI)]:
hdbsf = [hdev.domain, hdev.bus, hdev.slot, hdev.function]
dbsf = pci_utils.parse_address(dev['address'])
if [int(x, 16) for x in hdbsf] ==\
[int(x, 16) for x in dbsf]:
raise exception.PciDeviceDetachFailed(reason=
"timeout",
dev=dev)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
LOG.warn(_LW("Instance disappeared while detaching "
"a PCI device from it."))
else:
raise
def _attach_pci_devices(self, dom, pci_devs):
try:
for dev in pci_devs:
dom.attachDevice(self._get_guest_pci_device(dev).to_xml())
except libvirt.libvirtError:
LOG.error(_LE('Attaching PCI devices %(dev)s to %(dom)s failed.'),
{'dev': pci_devs, 'dom': dom.ID()})
raise
def _prepare_args_for_get_config(self, context, instance):
image_ref = instance.image_ref
image_meta = compute_utils.get_image_metadata(
context, self._image_api, image_ref, instance)
return instance.flavor, image_meta
@staticmethod
def _has_sriov_port(network_info):
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
return True
return False
def _attach_sriov_ports(self, context, instance, dom, network_info=None):
if network_info is None:
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
flavor, image_meta = self._prepare_args_for_get_config(context,
instance)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
flavor,
CONF.libvirt.virt_type)
LOG.debug('Attaching SR-IOV port %(port)s to %(dom)s',
{'port': vif, 'dom': dom.ID()})
dom.attachDevice(cfg.to_xml())
def _detach_sriov_ports(self, context, instance, dom):
network_info = instance.info_cache.network_info
if network_info is None:
return
if self._has_sriov_port(network_info):
# for libvirt version < 1.1.1, this is race condition
# so forbid detach if it's an older version
if not self._host.has_min_version(
MIN_LIBVIRT_DEVICE_CALLBACK_VERSION):
reason = (_("Detaching SR-IOV ports with"
" libvirt < %(ver)s is not permitted") %
{'ver': MIN_LIBVIRT_DEVICE_CALLBACK_VERSION})
raise exception.PciDeviceDetachFailed(reason=reason,
dev=network_info)
flavor, image_meta = self._prepare_args_for_get_config(context,
instance)
for vif in network_info:
if vif['vnic_type'] == network_model.VNIC_TYPE_DIRECT:
cfg = self.vif_driver.get_config(instance,
vif,
image_meta,
flavor,
CONF.libvirt.virt_type)
dom.detachDeviceFlags(cfg.to_xml(),
libvirt.VIR_DOMAIN_AFFECT_LIVE)
def _set_host_enabled(self, enabled,
disable_reason=DISABLE_REASON_UNDEFINED):
"""Enables / Disables the compute service on this host.
This doesn't override non-automatic disablement with an automatic
setting; thereby permitting operators to keep otherwise
healthy hosts out of rotation.
"""
status_name = {True: 'disabled',
False: 'enabled'}
disable_service = not enabled
ctx = nova_context.get_admin_context()
try:
service = objects.Service.get_by_compute_host(ctx, CONF.host)
if service.disabled != disable_service:
# Note(jang): this is a quick fix to stop operator-
# disabled compute hosts from re-enabling themselves
# automatically. We prefix any automatic reason code
# with a fixed string. We only re-enable a host
# automatically if we find that string in place.
# This should probably be replaced with a separate flag.
if not service.disabled or (
service.disabled_reason and
service.disabled_reason.startswith(DISABLE_PREFIX)):
service.disabled = disable_service
service.disabled_reason = (
DISABLE_PREFIX + disable_reason
if disable_service else DISABLE_REASON_UNDEFINED)
service.save()
LOG.debug('Updating compute service status to %s',
status_name[disable_service])
else:
LOG.debug('Not overriding manual compute service '
'status with: %s',
status_name[disable_service])
except exception.ComputeHostNotFound:
LOG.warn(_LW('Cannot update service status on host "%s" '
'since it is not registered.'), CONF.host)
except Exception:
LOG.warn(_LW('Cannot update service status on host "%s" '
'due to an unexpected exception.'), CONF.host,
exc_info=True)
def _get_guest_cpu_model_config(self):
mode = CONF.libvirt.cpu_mode
model = CONF.libvirt.cpu_model
if (CONF.libvirt.virt_type == "kvm" or
CONF.libvirt.virt_type == "qemu"):
if mode is None:
mode = "host-model"
if mode == "none":
return vconfig.LibvirtConfigGuestCPU()
else:
if mode is None or mode == "none":
return None
if ((CONF.libvirt.virt_type != "kvm" and
CONF.libvirt.virt_type != "qemu")):
msg = _("Config requested an explicit CPU model, but "
"the current libvirt hypervisor '%s' does not "
"support selecting CPU models") % CONF.libvirt.virt_type
raise exception.Invalid(msg)
if mode == "custom" and model is None:
msg = _("Config requested a custom CPU model, but no "
"model name was provided")
raise exception.Invalid(msg)
elif mode != "custom" and model is not None:
msg = _("A CPU model name should not be set when a "
"host CPU model is requested")
raise exception.Invalid(msg)
LOG.debug("CPU mode '%(mode)s' model '%(model)s' was chosen",
{'mode': mode, 'model': (model or "")})
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.mode = mode
cpu.model = model
return cpu
def _get_guest_cpu_config(self, flavor, image,
guest_cpu_numa_config, instance_numa_topology):
cpu = self._get_guest_cpu_model_config()
if cpu is None:
return None
topology = hardware.get_best_cpu_topology(
flavor, image, numa_topology=instance_numa_topology)
cpu.sockets = topology.sockets
cpu.cores = topology.cores
cpu.threads = topology.threads
cpu.numa = guest_cpu_numa_config
return cpu
def _get_guest_disk_config(self, instance, name, disk_mapping, inst_type,
image_type=None):
if CONF.libvirt.hw_disk_discard:
if not self._host.has_min_version(MIN_LIBVIRT_DISCARD_VERSION,
MIN_QEMU_DISCARD_VERSION,
REQ_HYPERVISOR_DISCARD):
msg = (_('Volume sets discard option, but libvirt %(libvirt)s'
' or later is required, qemu %(qemu)s'
' or later is required.') %
{'libvirt': MIN_LIBVIRT_DISCARD_VERSION,
'qemu': MIN_QEMU_DISCARD_VERSION})
raise exception.Invalid(msg)
image = self.image_backend.image(instance,
name,
image_type)
disk_info = disk_mapping[name]
return image.libvirt_info(disk_info['bus'],
disk_info['dev'],
disk_info['type'],
self.disk_cachemode,
inst_type['extra_specs'],
self._host.get_version())
def _get_guest_fs_config(self, instance, name, image_type=None):
image = self.image_backend.image(instance,
name,
image_type)
return image.libvirt_fs_info("/", "ploop")
def _get_guest_storage_config(self, instance, image_meta,
disk_info,
rescue, block_device_info,
inst_type, os_type):
devices = []
disk_mapping = disk_info['mapping']
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
mount_rootfs = CONF.libvirt.virt_type == "lxc"
if mount_rootfs:
fs = vconfig.LibvirtConfigGuestFilesys()
fs.source_type = "mount"
fs.source_dir = os.path.join(
libvirt_utils.get_instance_path(instance), 'rootfs')
devices.append(fs)
elif os_type == vm_mode.EXE and CONF.libvirt.virt_type == "parallels":
fs = self._get_guest_fs_config(instance, "disk")
devices.append(fs)
else:
if rescue:
diskrescue = self._get_guest_disk_config(instance,
'disk.rescue',
disk_mapping,
inst_type)
devices.append(diskrescue)
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
else:
if 'disk' in disk_mapping:
diskos = self._get_guest_disk_config(instance,
'disk',
disk_mapping,
inst_type)
devices.append(diskos)
if 'disk.local' in disk_mapping:
disklocal = self._get_guest_disk_config(instance,
'disk.local',
disk_mapping,
inst_type)
devices.append(disklocal)
instance.default_ephemeral_device = (
block_device.prepend_dev(disklocal.target_dev))
for idx, eph in enumerate(
driver.block_device_info_get_ephemerals(
block_device_info)):
diskeph = self._get_guest_disk_config(
instance,
blockinfo.get_eph_disk(idx),
disk_mapping, inst_type)
devices.append(diskeph)
if 'disk.swap' in disk_mapping:
diskswap = self._get_guest_disk_config(instance,
'disk.swap',
disk_mapping,
inst_type)
devices.append(diskswap)
instance.default_swap_device = (
block_device.prepend_dev(diskswap.target_dev))
if 'disk.config' in disk_mapping:
diskconfig = self._get_guest_disk_config(instance,
'disk.config',
disk_mapping,
inst_type,
'raw')
devices.append(diskconfig)
for vol in block_device.get_bdms_to_connect(block_device_mapping,
mount_rootfs):
connection_info = vol['connection_info']
vol_dev = block_device.prepend_dev(vol['mount_device'])
info = disk_mapping[vol_dev]
self._connect_volume(connection_info, info)
cfg = self._get_volume_config(connection_info, info)
devices.append(cfg)
vol['connection_info'] = connection_info
vol.save()
for d in devices:
self._set_cache_mode(d)
if (image_meta and
image_meta.get('properties', {}).get('hw_scsi_model')):
hw_scsi_model = image_meta['properties']['hw_scsi_model']
scsi_controller = vconfig.LibvirtConfigGuestController()
scsi_controller.type = 'scsi'
scsi_controller.model = hw_scsi_model
devices.append(scsi_controller)
return devices
def _get_host_sysinfo_serial_hardware(self):
"""Get a UUID from the host hardware
Get a UUID for the host hardware reported by libvirt.
This is typically from the SMBIOS data, unless it has
been overridden in /etc/libvirt/libvirtd.conf
"""
caps = self._host.get_capabilities()
return caps.host.uuid
def _get_host_sysinfo_serial_os(self):
"""Get a UUID from the host operating system
Get a UUID for the host operating system. Modern Linux
distros based on systemd provide a /etc/machine-id
file containing a UUID. This is also provided inside
systemd based containers and can be provided by other
init systems too, since it is just a plain text file.
"""
with open("/etc/machine-id") as f:
# We want to have '-' in the right place
# so we parse & reformat the value
return str(uuid.UUID(f.read().split()[0]))
def _get_host_sysinfo_serial_auto(self):
if os.path.exists("/etc/machine-id"):
return self._get_host_sysinfo_serial_os()
else:
return self._get_host_sysinfo_serial_hardware()
def _get_guest_config_sysinfo(self, instance):
sysinfo = vconfig.LibvirtConfigGuestSysinfo()
sysinfo.system_manufacturer = version.vendor_string()
sysinfo.system_product = version.product_string()
sysinfo.system_version = version.version_string_with_package()
sysinfo.system_serial = self._sysinfo_serial_func()
sysinfo.system_uuid = instance.uuid
return sysinfo
def _get_guest_pci_device(self, pci_device):
dbsf = pci_utils.parse_address(pci_device['address'])
dev = vconfig.LibvirtConfigGuestHostdevPCI()
dev.domain, dev.bus, dev.slot, dev.function = dbsf
# only kvm support managed mode
if CONF.libvirt.virt_type in ('xen', 'parallels',):
dev.managed = 'no'
if CONF.libvirt.virt_type in ('kvm', 'qemu'):
dev.managed = 'yes'
return dev
def _get_guest_config_meta(self, context, instance):
"""Get metadata config for guest."""
meta = vconfig.LibvirtConfigGuestMetaNovaInstance()
meta.package = version.version_string_with_package()
meta.name = instance.display_name
meta.creationTime = time.time()
if instance.image_ref not in ("", None):
meta.roottype = "image"
meta.rootid = instance.image_ref
if context is not None:
ometa = vconfig.LibvirtConfigGuestMetaNovaOwner()
ometa.userid = context.user_id
ometa.username = context.user_name
ometa.projectid = context.project_id
ometa.projectname = context.project_name
meta.owner = ometa
fmeta = vconfig.LibvirtConfigGuestMetaNovaFlavor()
flavor = instance.flavor
fmeta.name = flavor.name
fmeta.memory = flavor.memory_mb
fmeta.vcpus = flavor.vcpus
fmeta.ephemeral = flavor.ephemeral_gb
fmeta.disk = flavor.root_gb
fmeta.swap = flavor.swap
meta.flavor = fmeta
return meta
def _machine_type_mappings(self):
mappings = {}
for mapping in CONF.libvirt.hw_machine_type:
host_arch, _, machine_type = mapping.partition('=')
mappings[host_arch] = machine_type
return mappings
def _get_machine_type(self, image_meta, caps):
# The underlying machine type can be set as an image attribute,
# or otherwise based on some architecture specific defaults
mach_type = None
if (image_meta is not None and image_meta.get('properties') and
image_meta['properties'].get('hw_machine_type')
is not None):
mach_type = image_meta['properties']['hw_machine_type']
else:
# For ARM systems we will default to vexpress-a15 for armv7
# and virt for aarch64
if caps.host.cpu.arch == arch.ARMV7:
mach_type = "vexpress-a15"
if caps.host.cpu.arch == arch.AARCH64:
mach_type = "virt"
if caps.host.cpu.arch in (arch.S390, arch.S390X):
mach_type = 's390-ccw-virtio'
# If set in the config, use that as the default.
if CONF.libvirt.hw_machine_type:
mappings = self._machine_type_mappings()
mach_type = mappings.get(caps.host.cpu.arch)
return mach_type
@staticmethod
def _create_idmaps(klass, map_strings):
idmaps = []
if len(map_strings) > 5:
map_strings = map_strings[0:5]
LOG.warn(_LW("Too many id maps, only included first five."))
for map_string in map_strings:
try:
idmap = klass()
values = [int(i) for i in map_string.split(":")]
idmap.start = values[0]
idmap.target = values[1]
idmap.count = values[2]
idmaps.append(idmap)
except (ValueError, IndexError):
LOG.warn(_LW("Invalid value for id mapping %s"), map_string)
return idmaps
def _get_guest_idmaps(self):
id_maps = []
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.uid_maps:
uid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestUIDMap,
CONF.libvirt.uid_maps)
id_maps.extend(uid_maps)
if CONF.libvirt.virt_type == 'lxc' and CONF.libvirt.gid_maps:
gid_maps = self._create_idmaps(vconfig.LibvirtConfigGuestGIDMap,
CONF.libvirt.gid_maps)
id_maps.extend(gid_maps)
return id_maps
def _update_guest_cputune(self, guest, flavor, virt_type):
if virt_type in ('lxc', 'kvm', 'qemu'):
if guest.cputune is None:
guest.cputune = vconfig.LibvirtConfigGuestCPUTune()
# Setting the default cpu.shares value to be a value
# dependent on the number of vcpus
guest.cputune.shares = 1024 * guest.vcpus
cputuning = ['shares', 'period', 'quota']
for name in cputuning:
key = "quota:cpu_" + name
if key in flavor.extra_specs:
setattr(guest.cputune, name,
int(flavor.extra_specs[key]))
def _get_cpu_numa_config_from_instance(self, instance_numa_topology):
if instance_numa_topology:
guest_cpu_numa = vconfig.LibvirtConfigGuestCPUNUMA()
for instance_cell in instance_numa_topology.cells:
guest_cell = vconfig.LibvirtConfigGuestCPUNUMACell()
guest_cell.id = instance_cell.id
guest_cell.cpus = instance_cell.cpuset
guest_cell.memory = instance_cell.memory * units.Ki
guest_cpu_numa.cells.append(guest_cell)
return guest_cpu_numa
def _has_cpu_policy_support(self):
for ver in BAD_LIBVIRT_CPU_POLICY_VERSIONS:
if self._host.has_version(ver):
ver_ = self._version_to_string(version)
raise exception.CPUPinningNotSupported(reason=_(
'Invalid libvirt version %(version)s') % {'version': ver_})
return True
def _get_guest_numa_config(self, instance_numa_topology, flavor, pci_devs,
allowed_cpus=None):
"""Returns the config objects for the guest NUMA specs.
Determines the CPUs that the guest can be pinned to if the guest
specifies a cell topology and the host supports it. Constructs the
libvirt XML config object representing the NUMA topology selected
for the guest. Returns a tuple of:
(cpu_set, guest_cpu_tune, guest_cpu_numa, guest_numa_tune)
With the following caveats:
a) If there is no specified guest NUMA topology, then
all tuple elements except cpu_set shall be None. cpu_set
will be populated with the chosen CPUs that the guest
allowed CPUs fit within, which could be the supplied
allowed_cpus value if the host doesn't support NUMA
topologies.
b) If there is a specified guest NUMA topology, then
cpu_set will be None and guest_cpu_numa will be the
LibvirtConfigGuestCPUNUMA object representing the guest's
NUMA topology. If the host supports NUMA, then guest_cpu_tune
will contain a LibvirtConfigGuestCPUTune object representing
the optimized chosen cells that match the host capabilities
with the instance's requested topology. If the host does
not support NUMA, then guest_cpu_tune and guest_numa_tune
will be None.
"""
if (not self._has_numa_support() and
instance_numa_topology is not None):
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.NUMATopologyUnsupported()
topology = self._get_host_numa_topology()
# We have instance NUMA so translate it to the config class
guest_cpu_numa_config = self._get_cpu_numa_config_from_instance(
instance_numa_topology)
if not guest_cpu_numa_config:
# No NUMA topology defined for instance
vcpus = flavor.vcpus
memory = flavor.memory_mb
if topology:
# Host is NUMA capable so try to keep the instance in a cell
pci_cells = {pci.numa_node for pci in pci_devs}
if len(pci_cells) == 0:
viable_cells_cpus = []
for cell in topology.cells:
if vcpus <= len(cell.cpuset) and memory <= cell.memory:
viable_cells_cpus.append(cell.cpuset)
if viable_cells_cpus:
pin_cpuset = random.choice(viable_cells_cpus)
return GuestNumaConfig(pin_cpuset, None, None, None)
elif len(pci_cells) == 1 and None not in pci_cells:
cell = topology.cells[pci_cells.pop()]
if vcpus <= len(cell.cpuset) and memory <= cell.memory:
return GuestNumaConfig(cell.cpuset, None, None, None)
# We have no NUMA topology in the host either,
# or we can't find a single cell to acomodate the instance
# TODO(ndipanov): Attempt to spread the instance
# across NUMA nodes and expose the topology to the
# instance as an optimisation
return GuestNumaConfig(allowed_cpus, None, None, None)
else:
if topology:
# Now get the CpuTune configuration from the numa_topology
guest_cpu_tune = vconfig.LibvirtConfigGuestCPUTune()
guest_numa_tune = vconfig.LibvirtConfigGuestNUMATune()
allpcpus = []
numa_mem = vconfig.LibvirtConfigGuestNUMATuneMemory()
numa_memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()
for _ in guest_cpu_numa_config.cells]
for host_cell in topology.cells:
for guest_node_id, guest_config_cell in enumerate(
guest_cpu_numa_config.cells):
if guest_config_cell.id == host_cell.id:
node = numa_memnodes[guest_node_id]
node.cellid = guest_config_cell.id
node.nodeset = [host_cell.id]
node.mode = "strict"
numa_mem.nodeset.append(host_cell.id)
object_numa_cell = (
instance_numa_topology.cells[guest_node_id]
)
for cpu in guest_config_cell.cpus:
pin_cpuset = (
vconfig.LibvirtConfigGuestCPUTuneVCPUPin())
pin_cpuset.id = cpu
# If there is pinning information in the cell
# we pin to individual CPUs, otherwise we float
# over the whole host NUMA node
if (object_numa_cell.cpu_pinning and
self._has_cpu_policy_support()):
pcpu = object_numa_cell.cpu_pinning[cpu]
pin_cpuset.cpuset = set([pcpu])
else:
pin_cpuset.cpuset = host_cell.cpuset
allpcpus.extend(pin_cpuset.cpuset)
guest_cpu_tune.vcpupin.append(pin_cpuset)
# TODO(berrange) When the guest has >1 NUMA node, it will
# span multiple host NUMA nodes. By pinning emulator threads
# to the union of all nodes, we guarantee there will be
# cross-node memory access by the emulator threads when
# responding to guest I/O operations. The only way to avoid
# this would be to pin emulator threads to a single node and
# tell the guest OS to only do I/O from one of its virtual
# NUMA nodes. This is not even remotely practical.
#
# The long term solution is to make use of a new QEMU feature
# called "I/O Threads" which will let us configure an explicit
# I/O thread for each guest vCPU or guest NUMA node. It is
# still TBD how to make use of this feature though, especially
# how to associate IO threads with guest devices to eliminiate
# cross NUMA node traffic. This is an area of investigation
# for QEMU community devs.
emulatorpin = vconfig.LibvirtConfigGuestCPUTuneEmulatorPin()
emulatorpin.cpuset = set(allpcpus)
guest_cpu_tune.emulatorpin = emulatorpin
# Sort the vcpupin list per vCPU id for human-friendlier XML
guest_cpu_tune.vcpupin.sort(key=operator.attrgetter("id"))
guest_numa_tune.memory = numa_mem
guest_numa_tune.memnodes = numa_memnodes
# normalize cell.id
for i, (cell, memnode) in enumerate(
zip(guest_cpu_numa_config.cells,
guest_numa_tune.memnodes)):
cell.id = i
memnode.cellid = i
return GuestNumaConfig(None, guest_cpu_tune,
guest_cpu_numa_config,
guest_numa_tune)
else:
return GuestNumaConfig(allowed_cpus, None,
guest_cpu_numa_config, None)
def _get_guest_os_type(self, virt_type):
"""Returns the guest OS type based on virt type."""
if virt_type == "lxc":
ret = vm_mode.EXE
elif virt_type == "uml":
ret = vm_mode.UML
elif virt_type == "xen":
ret = vm_mode.XEN
else:
ret = vm_mode.HVM
return ret
def _set_guest_for_rescue(self, rescue, guest, inst_path, virt_type,
root_device_name):
if rescue.get('kernel_id'):
guest.os_kernel = os.path.join(inst_path, "kernel.rescue")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if rescue.get('ramdisk_id'):
guest.os_initrd = os.path.join(inst_path, "ramdisk.rescue")
def _set_guest_for_inst_kernel(self, instance, guest, inst_path, virt_type,
root_device_name, image_meta):
guest.os_kernel = os.path.join(inst_path, "kernel")
if virt_type == "xen":
guest.os_cmdline = "ro root=%s" % root_device_name
else:
guest.os_cmdline = ("root=%s %s" % (root_device_name, CONSOLE))
if virt_type == "qemu":
guest.os_cmdline += " no_timer_check"
if instance.ramdisk_id:
guest.os_initrd = os.path.join(inst_path, "ramdisk")
# we only support os_command_line with images with an explicit
# kernel set and don't want to break nova if there's an
# os_command_line property without a specified kernel_id param
if image_meta:
img_props = image_meta.get('properties', {})
if img_props.get('os_command_line'):
guest.os_cmdline = img_props.get('os_command_line')
def _set_clock(self, guest, os_type, image_meta, virt_type):
# NOTE(mikal): Microsoft Windows expects the clock to be in
# "localtime". If the clock is set to UTC, then you can use a
# registry key to let windows know, but Microsoft says this is
# buggy in http://support.microsoft.com/kb/2687252
clk = vconfig.LibvirtConfigGuestClock()
if os_type == 'windows':
LOG.info(_LI('Configuring timezone for windows instance to '
'localtime'))
clk.offset = 'localtime'
else:
clk.offset = 'utc'
guest.set_clock(clk)
if virt_type == "kvm":
self._set_kvm_timers(clk, os_type, image_meta)
def _set_kvm_timers(self, clk, os_type, image_meta):
# TODO(berrange) One day this should be per-guest
# OS type configurable
tmpit = vconfig.LibvirtConfigGuestTimer()
tmpit.name = "pit"
tmpit.tickpolicy = "delay"
tmrtc = vconfig.LibvirtConfigGuestTimer()
tmrtc.name = "rtc"
tmrtc.tickpolicy = "catchup"
clk.add_timer(tmpit)
clk.add_timer(tmrtc)
guestarch = libvirt_utils.get_arch(image_meta)
if guestarch in (arch.I686, arch.X86_64):
# NOTE(rfolco): HPET is a hardware timer for x86 arch.
# qemu -no-hpet is not supported on non-x86 targets.
tmhpet = vconfig.LibvirtConfigGuestTimer()
tmhpet.name = "hpet"
tmhpet.present = False
clk.add_timer(tmhpet)
# With new enough QEMU we can provide Windows guests
# with the paravirtualized hyperv timer source. This
# is the windows equiv of kvm-clock, allowing Windows
# guests to accurately keep time.
if (os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_TIMER_VERSION,
MIN_QEMU_HYPERV_TIMER_VERSION)):
tmhyperv = vconfig.LibvirtConfigGuestTimer()
tmhyperv.name = "hypervclock"
tmhyperv.present = True
clk.add_timer(tmhyperv)
def _set_features(self, guest, os_type, caps, virt_type):
if virt_type == "xen":
# PAE only makes sense in X86
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.features.append(vconfig.LibvirtConfigGuestFeaturePAE())
if (virt_type not in ("lxc", "uml", "parallels", "xen") or
(virt_type == "xen" and guest.os_type == vm_mode.HVM)):
guest.features.append(vconfig.LibvirtConfigGuestFeatureACPI())
guest.features.append(vconfig.LibvirtConfigGuestFeatureAPIC())
if (virt_type in ("qemu", "kvm") and
os_type == 'windows' and
self._host.has_min_version(MIN_LIBVIRT_HYPERV_FEATURE_VERSION,
MIN_QEMU_HYPERV_FEATURE_VERSION)):
hv = vconfig.LibvirtConfigGuestFeatureHyperV()
hv.relaxed = True
if self._host.has_min_version(
MIN_LIBVIRT_HYPERV_FEATURE_EXTRA_VERSION):
hv.spinlocks = True
# Increase spinlock retries - value recommended by
# KVM maintainers who certify Windows guests
# with Microsoft
hv.spinlock_retries = 8191
hv.vapic = True
guest.features.append(hv)
def _create_serial_console_devices(self, guest, instance, flavor,
image_meta):
guest_arch = libvirt_utils.get_arch(image_meta)
if CONF.serial_console.enabled:
num_ports = hardware.get_number_of_serial_ports(
flavor, image_meta)
for port in six.moves.range(num_ports):
if guest_arch in (arch.S390, arch.S390X):
console = vconfig.LibvirtConfigGuestConsole()
else:
console = vconfig.LibvirtConfigGuestSerial()
console.port = port
console.type = "tcp"
console.listen_host = (
CONF.serial_console.proxyclient_address)
console.listen_port = (
serial_console.acquire_port(
console.listen_host))
guest.add_device(console)
else:
# The QEMU 'pty' driver throws away any data if no
# client app is connected. Thus we can't get away
# with a single type=pty console. Instead we have
# to configure two separate consoles.
if guest_arch in (arch.S390, arch.S390X):
consolelog = vconfig.LibvirtConfigGuestConsole()
consolelog.target_type = "sclplm"
else:
consolelog = vconfig.LibvirtConfigGuestSerial()
consolelog.type = "file"
consolelog.source_path = self._get_console_log_path(instance)
guest.add_device(consolelog)
def _add_video_driver(self, guest, image_meta, img_meta_prop, flavor):
VALID_VIDEO_DEVICES = ("vga", "cirrus", "vmvga", "xen", "qxl")
video = vconfig.LibvirtConfigGuestVideo()
# NOTE(ldbragst): The following logic sets the video.type
# depending on supported defaults given the architecture,
# virtualization type, and features. The video.type attribute can
# be overridden by the user with image_meta['properties'], which
# is carried out in the next if statement below this one.
guestarch = libvirt_utils.get_arch(image_meta)
if guest.os_type == vm_mode.XEN:
video.type = 'xen'
elif CONF.libvirt.virt_type == 'parallels':
video.type = 'vga'
elif guestarch in (arch.PPC, arch.PPC64):
# NOTE(ldbragst): PowerKVM doesn't support 'cirrus' be default
# so use 'vga' instead when running on Power hardware.
video.type = 'vga'
elif CONF.spice.enabled:
video.type = 'qxl'
if img_meta_prop.get('hw_video_model'):
video.type = img_meta_prop.get('hw_video_model')
if (video.type not in VALID_VIDEO_DEVICES):
raise exception.InvalidVideoMode(model=video.type)
# Set video memory, only if the flavor's limit is set
video_ram = int(img_meta_prop.get('hw_video_ram', 0))
max_vram = int(flavor.extra_specs.get('hw_video:ram_max_mb', 0))
if video_ram > max_vram:
raise exception.RequestedVRamTooHigh(req_vram=video_ram,
max_vram=max_vram)
if max_vram and video_ram:
video.vram = video_ram * units.Mi / units.Ki
guest.add_device(video)
def _add_qga_device(self, guest, instance):
qga = vconfig.LibvirtConfigGuestChannel()
qga.type = "unix"
qga.target_name = "org.qemu.guest_agent.0"
qga.source_path = ("/var/lib/libvirt/qemu/%s.%s.sock" %
("org.qemu.guest_agent.0", instance.name))
guest.add_device(qga)
def _add_rng_device(self, guest, flavor):
rng_device = vconfig.LibvirtConfigGuestRng()
rate_bytes = flavor.extra_specs.get('hw_rng:rate_bytes', 0)
period = flavor.extra_specs.get('hw_rng:rate_period', 0)
if rate_bytes:
rng_device.rate_bytes = int(rate_bytes)
rng_device.rate_period = int(period)
rng_path = CONF.libvirt.rng_dev_path
if (rng_path and not os.path.exists(rng_path)):
raise exception.RngDeviceNotExist(path=rng_path)
rng_device.backend = rng_path
guest.add_device(rng_device)
def _set_qemu_guest_agent(self, guest, flavor, instance, img_meta_prop):
qga_enabled = False
# Enable qga only if the 'hw_qemu_guest_agent' is equal to yes
hw_qga = img_meta_prop.get('hw_qemu_guest_agent', '')
if strutils.bool_from_string(hw_qga):
LOG.debug("Qemu guest agent is enabled through image "
"metadata", instance=instance)
qga_enabled = True
if qga_enabled:
self._add_qga_device(guest, instance)
rng_is_virtio = img_meta_prop.get('hw_rng_model') == 'virtio'
rng_allowed_str = flavor.extra_specs.get('hw_rng:allowed', '')
rng_allowed = strutils.bool_from_string(rng_allowed_str)
if rng_is_virtio and rng_allowed:
self._add_rng_device(guest, flavor)
def _get_guest_memory_backing_config(self, inst_topology, numatune):
wantsmempages = False
if inst_topology:
for cell in inst_topology.cells:
if cell.pagesize:
wantsmempages = True
if not wantsmempages:
return
if not self._has_hugepage_support():
# We should not get here, since we should have avoided
# reporting NUMA topology from _get_host_numa_topology
# in the first place. Just in case of a scheduler
# mess up though, raise an exception
raise exception.MemoryPagesUnsupported()
host_topology = self._get_host_numa_topology()
if host_topology is None:
# As above, we should not get here but just in case...
raise exception.MemoryPagesUnsupported()
# Currently libvirt does not support the smallest
# pagesize set as a backend memory.
# https://bugzilla.redhat.com/show_bug.cgi?id=1173507
avail_pagesize = [page.size_kb
for page in host_topology.cells[0].mempages]
avail_pagesize.sort()
smallest = avail_pagesize[0]
pages = []
for guest_cellid, inst_cell in enumerate(inst_topology.cells):
if inst_cell.pagesize and inst_cell.pagesize > smallest:
for memnode in numatune.memnodes:
if guest_cellid == memnode.cellid:
page = (
vconfig.LibvirtConfigGuestMemoryBackingPage())
page.nodeset = [guest_cellid]
page.size_kb = inst_cell.pagesize
pages.append(page)
break # Quit early...
if pages:
membacking = vconfig.LibvirtConfigGuestMemoryBacking()
membacking.hugepages = pages
return membacking
def _get_flavor(self, ctxt, instance, flavor):
if flavor is not None:
return flavor
return instance.flavor
def _configure_guest_by_virt_type(self, guest, virt_type, caps, instance,
image_meta, flavor, root_device_name):
if virt_type == "xen":
if guest.os_type == vm_mode.HVM:
guest.os_loader = CONF.libvirt.xen_hvmloader_path
elif virt_type in ("kvm", "qemu"):
if caps.host.cpu.arch in (arch.I686, arch.X86_64):
guest.sysinfo = self._get_guest_config_sysinfo(instance)
guest.os_smbios = vconfig.LibvirtConfigGuestSMBIOS()
guest.os_mach_type = self._get_machine_type(image_meta, caps)
guest.os_bootmenu = strutils.bool_from_string(
flavor.extra_specs.get(
'hw:boot_menu', image_meta.get('properties', {}).get(
'hw_boot_menu', 'no')))
elif virt_type == "lxc":
guest.os_init_path = "/sbin/init"
guest.os_cmdline = CONSOLE
elif virt_type == "uml":
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name
elif virt_type == "parallels":
if guest.os_type == vm_mode.EXE:
guest.os_init_path = "/sbin/init"
def _conf_non_lxc_uml(self, virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info):
if rescue:
self._set_guest_for_rescue(rescue, guest, inst_path, virt_type,
root_device_name)
elif instance.kernel_id:
self._set_guest_for_inst_kernel(instance, guest, inst_path,
virt_type, root_device_name,
image_meta)
else:
guest.os_boot_dev = blockinfo.get_boot_order(disk_info)
def _create_consoles(self, virt_type, guest, instance, flavor, image_meta,
caps):
if virt_type in ("qemu", "kvm"):
# Create the serial console char devices
self._create_serial_console_devices(guest, instance, flavor,
image_meta)
if caps.host.cpu.arch in (arch.S390, arch.S390X):
consolepty = vconfig.LibvirtConfigGuestConsole()
consolepty.target_type = "sclp"
else:
consolepty = vconfig.LibvirtConfigGuestSerial()
else:
consolepty = vconfig.LibvirtConfigGuestConsole()
return consolepty
def _cpu_config_to_vcpu_model(self, cpu_config, vcpu_model):
"""Update VirtCPUModel object according to libvirt CPU config.
:param:cpu_config: vconfig.LibvirtConfigGuestCPU presenting the
instance's virtual cpu configuration.
:param:vcpu_model: VirtCPUModel object. A new object will be created
if None.
:return: Updated VirtCPUModel object, or None if cpu_config is None
"""
if not cpu_config:
return
if not vcpu_model:
vcpu_model = objects.VirtCPUModel()
vcpu_model.arch = cpu_config.arch
vcpu_model.vendor = cpu_config.vendor
vcpu_model.model = cpu_config.model
vcpu_model.mode = cpu_config.mode
vcpu_model.match = cpu_config.match
if cpu_config.sockets:
vcpu_model.topology = objects.VirtCPUTopology(
sockets=cpu_config.sockets,
cores=cpu_config.cores,
threads=cpu_config.threads)
else:
vcpu_model.topology = None
features = [objects.VirtCPUFeature(
name=f.name,
policy=f.policy) for f in cpu_config.features]
vcpu_model.features = features
return vcpu_model
def _vcpu_model_to_cpu_config(self, vcpu_model):
"""Create libvirt CPU config according to VirtCPUModel object.
:param:vcpu_model: VirtCPUModel object.
:return: vconfig.LibvirtConfigGuestCPU.
"""
cpu_config = vconfig.LibvirtConfigGuestCPU()
cpu_config.arch = vcpu_model.arch
cpu_config.model = vcpu_model.model
cpu_config.mode = vcpu_model.mode
cpu_config.match = vcpu_model.match
cpu_config.vendor = vcpu_model.vendor
if vcpu_model.topology:
cpu_config.sockets = vcpu_model.topology.sockets
cpu_config.cores = vcpu_model.topology.cores
cpu_config.threads = vcpu_model.topology.threads
if vcpu_model.features:
for f in vcpu_model.features:
xf = vconfig.LibvirtConfigGuestCPUFeature()
xf.name = f.name
xf.policy = f.policy
cpu_config.features.add(xf)
return cpu_config
def _get_guest_config(self, instance, network_info, image_meta,
disk_info, rescue=None, block_device_info=None,
context=None):
"""Get config data for parameters.
:param rescue: optional dictionary that should contain the key
'ramdisk_id' if a ramdisk is needed for the rescue image and
'kernel_id' if a kernel is needed for the rescue image.
"""
flavor = instance.flavor
inst_path = libvirt_utils.get_instance_path(instance)
disk_mapping = disk_info['mapping']
img_meta_prop = image_meta.get('properties', {}) if image_meta else {}
virt_type = CONF.libvirt.virt_type
guest = vconfig.LibvirtConfigGuest()
guest.virt_type = virt_type
guest.name = instance.name
guest.uuid = instance.uuid
# We are using default unit for memory: KiB
guest.memory = flavor.memory_mb * units.Ki
guest.vcpus = flavor.vcpus
allowed_cpus = hardware.get_vcpu_pin_set()
pci_devs = pci_manager.get_instance_pci_devs(instance, 'all')
guest_numa_config = self._get_guest_numa_config(
instance.numa_topology, flavor, pci_devs, allowed_cpus)
guest.cpuset = guest_numa_config.cpuset
guest.cputune = guest_numa_config.cputune
guest.numatune = guest_numa_config.numatune
guest.membacking = self._get_guest_memory_backing_config(
instance.numa_topology, guest_numa_config.numatune)
guest.metadata.append(self._get_guest_config_meta(context,
instance))
guest.idmaps = self._get_guest_idmaps()
self._update_guest_cputune(guest, flavor, virt_type)
guest.cpu = self._get_guest_cpu_config(
flavor, image_meta, guest_numa_config.numaconfig,
instance.numa_topology)
# Notes(yjiang5): we always sync the instance's vcpu model with
# the corresponding config file.
instance.vcpu_model = self._cpu_config_to_vcpu_model(
guest.cpu, instance.vcpu_model)
if 'root' in disk_mapping:
root_device_name = block_device.prepend_dev(
disk_mapping['root']['dev'])
else:
root_device_name = None
if root_device_name:
# NOTE(yamahata):
# for nova.api.ec2.cloud.CloudController.get_metadata()
instance.root_device_name = root_device_name
guest.os_type = (vm_mode.get_from_instance(instance) or
self._get_guest_os_type(virt_type))
caps = self._host.get_capabilities()
self._configure_guest_by_virt_type(guest, virt_type, caps, instance,
image_meta, flavor,
root_device_name)
if virt_type not in ('lxc', 'uml'):
self._conf_non_lxc_uml(virt_type, guest, root_device_name, rescue,
instance, inst_path, image_meta, disk_info)
self._set_features(guest, instance.os_type, caps, virt_type)
self._set_clock(guest, instance.os_type, image_meta, virt_type)
storage_configs = self._get_guest_storage_config(
instance, image_meta, disk_info, rescue, block_device_info,
flavor, guest.os_type)
for config in storage_configs:
guest.add_device(config)
for vif in network_info:
config = self.vif_driver.get_config(
instance, vif, image_meta,
flavor, virt_type)
guest.add_device(config)
consolepty = self._create_consoles(virt_type, guest, instance, flavor,
image_meta, caps)
if virt_type != 'parallels':
consolepty.type = "pty"
guest.add_device(consolepty)
# We want a tablet if VNC is enabled, or SPICE is enabled and
# the SPICE agent is disabled. If the SPICE agent is enabled
# it provides a paravirt mouse which drastically reduces
# overhead (by eliminating USB polling).
#
# NB: this implies that if both SPICE + VNC are enabled
# at the same time, we'll get the tablet whether the
# SPICE agent is used or not.
need_usb_tablet = False
if CONF.vnc_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
elif CONF.spice.enabled and not CONF.spice.agent_enabled:
need_usb_tablet = CONF.libvirt.use_usb_tablet
if need_usb_tablet and guest.os_type == vm_mode.HVM:
tablet = vconfig.LibvirtConfigGuestInput()
tablet.type = "tablet"
tablet.bus = "usb"
guest.add_device(tablet)
if (CONF.spice.enabled and CONF.spice.agent_enabled and
virt_type not in ('lxc', 'uml', 'xen')):
channel = vconfig.LibvirtConfigGuestChannel()
channel.target_name = "com.redhat.spice.0"
guest.add_device(channel)
# NB some versions of libvirt support both SPICE and VNC
# at the same time. We're not trying to second guess which
# those versions are. We'll just let libvirt report the
# errors appropriately if the user enables both.
add_video_driver = False
if ((CONF.vnc_enabled and
virt_type not in ('lxc', 'uml'))):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "vnc"
graphics.keymap = CONF.vnc_keymap
graphics.listen = CONF.vncserver_listen
guest.add_device(graphics)
add_video_driver = True
if (CONF.spice.enabled and
virt_type not in ('lxc', 'uml', 'xen')):
graphics = vconfig.LibvirtConfigGuestGraphics()
graphics.type = "spice"
graphics.keymap = CONF.spice.keymap
graphics.listen = CONF.spice.server_listen
guest.add_device(graphics)
add_video_driver = True
if add_video_driver:
self._add_video_driver(guest, image_meta, img_meta_prop, flavor)
# Qemu guest agent only support 'qemu' and 'kvm' hypervisor
if virt_type in ('qemu', 'kvm'):
self._set_qemu_guest_agent(guest, flavor, instance, img_meta_prop)
if virt_type in ('xen', 'qemu', 'kvm'):
for pci_dev in pci_manager.get_instance_pci_devs(instance):
guest.add_device(self._get_guest_pci_device(pci_dev))
else:
if len(pci_devs) > 0:
raise exception.PciDeviceUnsupportedHypervisor(
type=virt_type)
if 'hw_watchdog_action' in flavor.extra_specs:
LOG.warn(_LW('Old property name "hw_watchdog_action" is now '
'deprecated and will be removed in the next release. '
'Use updated property name '
'"hw:watchdog_action" instead'))
# TODO(pkholkin): accepting old property name 'hw_watchdog_action'
# should be removed in the next release
watchdog_action = (flavor.extra_specs.get('hw_watchdog_action') or
flavor.extra_specs.get('hw:watchdog_action')
or 'disabled')
if (image_meta is not None and
image_meta.get('properties', {}).get('hw_watchdog_action')):
watchdog_action = image_meta['properties']['hw_watchdog_action']
# NB(sross): currently only actually supported by KVM/QEmu
if watchdog_action != 'disabled':
if watchdog_actions.is_valid_watchdog_action(watchdog_action):
bark = vconfig.LibvirtConfigGuestWatchdog()
bark.action = watchdog_action
guest.add_device(bark)
else:
raise exception.InvalidWatchdogAction(action=watchdog_action)
# Memory balloon device only support 'qemu/kvm' and 'xen' hypervisor
if (virt_type in ('xen', 'qemu', 'kvm') and
CONF.libvirt.mem_stats_period_seconds > 0):
balloon = vconfig.LibvirtConfigMemoryBalloon()
if virt_type in ('qemu', 'kvm'):
balloon.model = 'virtio'
else:
balloon.model = 'xen'
balloon.period = CONF.libvirt.mem_stats_period_seconds
guest.add_device(balloon)
return guest
def _get_guest_xml(self, context, instance, network_info, disk_info,
image_meta, rescue=None,
block_device_info=None, write_to_disk=False):
# NOTE(danms): Stringifying a NetworkInfo will take a lock. Do
# this ahead of time so that we don't acquire it while also
# holding the logging lock.
network_info_str = str(network_info)
msg = ('Start _get_guest_xml '
'network_info=%(network_info)s '
'disk_info=%(disk_info)s '
'image_meta=%(image_meta)s rescue=%(rescue)s '
'block_device_info=%(block_device_info)s' %
{'network_info': network_info_str, 'disk_info': disk_info,
'image_meta': image_meta, 'rescue': rescue,
'block_device_info': block_device_info})
# NOTE(mriedem): block_device_info can contain auth_password so we
# need to sanitize the password in the message.
LOG.debug(strutils.mask_password(msg), instance=instance)
conf = self._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info,
context)
xml = conf.to_xml()
if write_to_disk:
instance_dir = libvirt_utils.get_instance_path(instance)
xml_path = os.path.join(instance_dir, 'libvirt.xml')
libvirt_utils.write_to_file(xml_path, xml)
LOG.debug('End _get_guest_xml xml=%(xml)s',
{'xml': xml}, instance=instance)
return xml
def get_info(self, instance):
"""Retrieve information from libvirt for a specific instance name.
If a libvirt error is encountered during lookup, we might raise a
NotFound exception or Error exception depending on how severe the
libvirt error is.
"""
virt_dom = self._host.get_domain(instance)
try:
dom_info = self._host.get_domain_info(virt_dom)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.name)
msg = (_('Error from libvirt while getting domain info for '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
return hardware.InstanceInfo(state=LIBVIRT_POWER_STATE[dom_info[0]],
max_mem_kb=dom_info[1],
mem_kb=dom_info[2],
num_cpu=dom_info[3],
cpu_time_ns=dom_info[4],
id=virt_dom.ID())
def _create_domain_setup_lxc(self, instance, image_meta,
block_device_info, disk_info):
inst_path = libvirt_utils.get_instance_path(instance)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
disk_info = disk_info or {}
disk_mapping = disk_info.get('mapping', [])
if self._is_booted_from_volume(instance, disk_mapping):
root_disk = block_device.get_root_bdm(block_device_mapping)
disk_path = root_disk['connection_info']['data']['device_path']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, root_disk)
self._connect_volume(root_disk['connection_info'], disk_info)
# Get the system metadata from the instance
use_cow = instance.system_metadata['image_disk_format'] == 'qcow2'
else:
image = self.image_backend.image(instance, 'disk')
disk_path = image.path
use_cow = CONF.use_cow_images
container_dir = os.path.join(inst_path, 'rootfs')
fileutils.ensure_tree(container_dir)
rootfs_dev = disk.setup_container(disk_path,
container_dir=container_dir,
use_cow=use_cow)
try:
# Save rootfs device to disconnect it when deleting the instance
if rootfs_dev:
instance.system_metadata['rootfs_device_name'] = rootfs_dev
if CONF.libvirt.uid_maps or CONF.libvirt.gid_maps:
id_maps = self._get_guest_idmaps()
libvirt_utils.chown_for_id_maps(container_dir, id_maps)
except Exception:
with excutils.save_and_reraise_exception():
self._create_domain_cleanup_lxc(instance)
def _create_domain_cleanup_lxc(self, instance):
inst_path = libvirt_utils.get_instance_path(instance)
container_dir = os.path.join(inst_path, 'rootfs')
try:
state = self.get_info(instance).state
except exception.InstanceNotFound:
# The domain may not be present if the instance failed to start
state = None
if state == power_state.RUNNING:
# NOTE(uni): Now the container is running with its own private
# mount namespace and so there is no need to keep the container
# rootfs mounted in the host namespace
disk.clean_lxc_namespace(container_dir=container_dir)
else:
disk.teardown_container(container_dir=container_dir)
@contextlib.contextmanager
def _lxc_disk_handler(self, instance, image_meta,
block_device_info, disk_info):
"""Context manager to handle the pre and post instance boot,
LXC specific disk operations.
An image or a volume path will be prepared and setup to be
used by the container, prior to starting it.
The disk will be disconnected and unmounted if a container has
failed to start.
"""
if CONF.libvirt.virt_type != 'lxc':
yield
return
self._create_domain_setup_lxc(instance, image_meta,
block_device_info, disk_info)
try:
yield
finally:
self._create_domain_cleanup_lxc(instance)
def _create_domain(self, xml=None, domain=None,
instance=None, launch_flags=0, power_on=True):
"""Create a domain.
Either domain or xml must be passed in. If both are passed, then
the domain definition is overwritten from the xml.
"""
err = None
try:
if xml:
err = _LE('Error defining a domain with XML: %s') % xml
domain = self._conn.defineXML(xml)
if power_on:
err = _LE('Error launching a defined domain with XML: %s') \
% encodeutils.safe_decode(domain.XMLDesc(0),
errors='ignore')
domain.createWithFlags(launch_flags)
if not utils.is_neutron():
err = _LE('Error enabling hairpin mode with XML: %s') \
% encodeutils.safe_decode(domain.XMLDesc(0),
errors='ignore')
self._enable_hairpin(domain.XMLDesc(0))
except Exception:
with excutils.save_and_reraise_exception():
if err:
LOG.error(err)
return domain
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event '
'%(event)s for instance %(uuid)s'),
{'event': event_name, 'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active', True) is False]
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False):
"""Do required network setup and create domain."""
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, image_meta,
block_device_info, disk_info):
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
def _get_all_block_devices(self):
"""Return all block devices in use on this node."""
devices = []
for dom in self._host.list_instance_domains():
try:
doc = etree.fromstring(dom.XMLDesc(0))
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the XML from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
except Exception:
continue
sources = doc.findall("./devices/disk[@type='block']/source")
for source in sources:
devices.append(source.get('dev'))
return devices
def _get_interfaces(self, xml):
"""Note that this function takes a domain xml.
Returns a list of all network interfaces for this instance.
"""
doc = None
try:
doc = etree.fromstring(xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def _get_vcpu_total(self):
"""Get available vcpu number of physical computer.
:returns: the number of cpu core instances can be used.
"""
if self._vcpu_total != 0:
return self._vcpu_total
try:
total_pcpus = self._conn.getInfo()[2]
except libvirt.libvirtError:
LOG.warn(_LW("Cannot get the number of cpu, because this "
"function is not implemented for this platform. "))
return 0
if CONF.vcpu_pin_set is None:
self._vcpu_total = total_pcpus
return self._vcpu_total
available_ids = hardware.get_vcpu_pin_set()
if sorted(available_ids)[-1] >= total_pcpus:
raise exception.Invalid(_("Invalid vcpu_pin_set config, "
"out of hypervisor cpu range."))
self._vcpu_total = len(available_ids)
return self._vcpu_total
def _get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._conn.getInfo()[1]
@staticmethod
def _get_local_gb_info():
"""Get local storage info of the compute node in GB.
:returns: A dict containing:
:total: How big the overall usable filesystem is (in gigabytes)
:free: How much space is free (in gigabytes)
:used: How much space is used (in gigabytes)
"""
if CONF.libvirt.images_type == 'lvm':
info = lvm.get_volume_group_info(
CONF.libvirt.images_volume_group)
elif CONF.libvirt.images_type == 'rbd':
info = LibvirtDriver._get_rbd_driver().get_pool_info()
else:
info = libvirt_utils.get_fs_info(CONF.instances_path)
for (k, v) in info.iteritems():
info[k] = v / units.Gi
return info
def _get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
:returns: The total number of vcpu(s) that are currently being used.
"""
total = 0
if CONF.libvirt.virt_type == 'lxc':
return total + 1
for dom in self._host.list_instance_domains():
try:
vcpus = dom.vcpus()
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the vpu count from domain id:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
else:
if vcpus is not None and len(vcpus) > 1:
total += len(vcpus[1])
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return total
def _get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self._host.list_instance_domains(only_guests=False):
try:
dom_mem = int(self._host.get_domain_info(dom)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used / units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self._get_memory_mb_total() - avail / units.Ki
def _get_instance_capabilities(self):
"""Get hypervisor instance capabilities
Returns a list of tuples that describe instances the
hypervisor is capable of hosting. Each tuple consists
of the triplet (arch, hypervisor_type, vm_mode).
:returns: List of tuples describing instance capabilities
"""
caps = self._host.get_capabilities()
instance_caps = list()
for g in caps.guests:
for dt in g.domtype:
instance_cap = (
arch.canonicalize(g.arch),
hv_type.canonicalize(dt),
vm_mode.canonicalize(g.ostype))
instance_caps.append(instance_cap)
return instance_caps
def _get_cpu_info(self):
"""Get cpuinfo information.
Obtains cpu feature from virConnect.getCapabilities.
:return: see above description
"""
caps = self._host.get_capabilities()
cpu_info = dict()
cpu_info['arch'] = caps.host.cpu.arch
cpu_info['model'] = caps.host.cpu.model
cpu_info['vendor'] = caps.host.cpu.vendor
topology = dict()
topology['sockets'] = caps.host.cpu.sockets
topology['cores'] = caps.host.cpu.cores
topology['threads'] = caps.host.cpu.threads
cpu_info['topology'] = topology
features = set()
for f in caps.host.cpu.features:
features.add(f.name)
cpu_info['features'] = features
return cpu_info
def _get_pcidev_info(self, devname):
"""Returns a dict of PCI device."""
def _get_device_type(cfgdev):
"""Get a PCI device's device type.
An assignable PCI device can be a normal PCI device,
a SR-IOV Physical Function (PF), or a SR-IOV Virtual
Function (VF). Only normal PCI devices or SR-IOV VFs
are assignable, while SR-IOV PFs are always owned by
hypervisor.
Please notice that a PCI device with SR-IOV
capability but not enabled is reported as normal PCI device.
"""
for fun_cap in cfgdev.pci_capability.fun_capability:
if len(fun_cap.device_addrs) != 0:
if fun_cap.type == 'virt_functions':
return {'dev_type': 'type-PF'}
if fun_cap.type == 'phys_function':
phys_address = "%04x:%02x:%02x.%01x" % (
fun_cap.device_addrs[0][0],
fun_cap.device_addrs[0][1],
fun_cap.device_addrs[0][2],
fun_cap.device_addrs[0][3])
return {'dev_type': 'type-VF',
'phys_function': phys_address}
return {'dev_type': 'type-PCI'}
virtdev = self._conn.nodeDeviceLookupByName(devname)
xmlstr = virtdev.XMLDesc(0)
cfgdev = vconfig.LibvirtConfigNodeDevice()
cfgdev.parse_str(xmlstr)
address = "%04x:%02x:%02x.%1x" % (
cfgdev.pci_capability.domain,
cfgdev.pci_capability.bus,
cfgdev.pci_capability.slot,
cfgdev.pci_capability.function)
device = {
"dev_id": cfgdev.name,
"address": address,
"product_id": "%04x" % cfgdev.pci_capability.product_id,
"vendor_id": "%04x" % cfgdev.pci_capability.vendor_id,
}
device["numa_node"] = cfgdev.pci_capability.numa_node
# requirement by DataBase Model
device['label'] = 'label_%(vendor_id)s_%(product_id)s' % device
device.update(_get_device_type(cfgdev))
return device
def _get_pci_passthrough_devices(self):
"""Get host PCI devices information.
Obtains pci devices information from libvirt, and returns
as a JSON string.
Each device information is a dictionary, with mandatory keys
of 'address', 'vendor_id', 'product_id', 'dev_type', 'dev_id',
'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
:returns: a JSON string containaing a list of the assignable PCI
devices information
"""
# Bail early if we know we can't support `listDevices` to avoid
# repeated warnings within a periodic task
if not getattr(self, '_list_devices_supported', True):
return jsonutils.dumps([])
try:
dev_names = self._conn.listDevices('pci', 0) or []
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
self._list_devices_supported = False
LOG.warn(_LW("URI %(uri)s does not support "
"listDevices: " "%(error)s"),
{'uri': self.uri(), 'error': ex})
return jsonutils.dumps([])
else:
raise
pci_info = []
for name in dev_names:
pci_info.append(self._get_pcidev_info(name))
return jsonutils.dumps(pci_info)
def _has_numa_support(self):
if not self._host.has_min_version(MIN_LIBVIRT_NUMA_VERSION):
return False
if CONF.libvirt.virt_type not in ['qemu', 'kvm']:
return False
return True
def _has_hugepage_support(self):
if not self._host.has_min_version(MIN_LIBVIRT_HUGEPAGE_VERSION):
return False
if CONF.libvirt.virt_type not in ['qemu', 'kvm']:
return False
return True
def _get_host_numa_topology(self):
if not self._has_numa_support():
return
caps = self._host.get_capabilities()
topology = caps.host.topology
if topology is None or not topology.cells:
return
cells = []
allowed_cpus = hardware.get_vcpu_pin_set()
online_cpus = self._host.get_online_cpus()
if allowed_cpus:
allowed_cpus &= online_cpus
else:
allowed_cpus = online_cpus
for cell in topology.cells:
cpuset = set(cpu.id for cpu in cell.cpus)
siblings = sorted(map(set,
set(tuple(cpu.siblings)
if cpu.siblings else ()
for cpu in cell.cpus)
))
cpuset &= allowed_cpus
siblings = [sib & allowed_cpus for sib in siblings]
# Filter out singles and empty sibling sets that may be left
siblings = [sib for sib in siblings if len(sib) > 1]
mempages = []
if self._has_hugepage_support():
mempages = [
objects.NUMAPagesTopology(
size_kb=pages.size,
total=pages.total,
used=0)
for pages in cell.mempages]
cell = objects.NUMACell(id=cell.id, cpuset=cpuset,
memory=cell.memory / units.Ki,
cpu_usage=0, memory_usage=0,
siblings=siblings,
pinned_cpus=set([]),
mempages=mempages)
cells.append(cell)
return objects.NUMATopology(cells=cells)
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.
"""
vol_usage = []
for instance_bdms in compute_host_bdms:
instance = instance_bdms['instance']
for bdm in instance_bdms['instance_bdms']:
vol_stats = []
mountpoint = bdm['device_name']
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
volume_id = bdm['volume_id']
LOG.debug("Trying to get stats for the volume %s",
volume_id)
vol_stats = self.block_stats(instance, mountpoint)
if vol_stats:
stats = dict(volume=volume_id,
instance=instance,
rd_req=vol_stats[0],
rd_bytes=vol_stats[1],
wr_req=vol_stats[2],
wr_bytes=vol_stats[3])
LOG.debug(
"Got volume usage stats for the volume=%(volume)s,"
" rd_req=%(rd_req)d, rd_bytes=%(rd_bytes)d, "
"wr_req=%(wr_req)d, wr_bytes=%(wr_bytes)d",
stats, instance=instance)
vol_usage.append(stats)
return vol_usage
def block_stats(self, instance, disk_id):
"""Note that this function takes an instance name."""
try:
domain = self._host.get_domain(instance)
return domain.blockStats(disk_id)
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.info(_LI('Getting block stats failed, device might have '
'been detached. Instance=%(instance_name)s '
'Disk=%(disk)s Code=%(errcode)s Error=%(e)s'),
{'instance_name': instance.name, 'disk': disk_id,
'errcode': errcode, 'e': e})
except exception.InstanceNotFound:
LOG.info(_LI('Could not find domain in libvirt for instance %s. '
'Cannot get block stats for device'), instance.name)
def get_console_pool_info(self, console_type):
# TODO(mdragon): console proxy should be implemented for libvirt,
# in case someone wants to use it with kvm or
# such. For now return fake data.
return {'address': '127.0.0.1',
'username': 'fakeuser',
'password': 'fakepassword'}
def refresh_security_group_rules(self, security_group_id):
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename: will be put in PCI device
:returns: dictionary containing resource info
"""
disk_info_dict = self._get_local_gb_info()
data = {}
# NOTE(dprince): calling capabilities before getVersion works around
# an initialization issue with some versions of Libvirt (1.0.5.5).
# See: https://bugzilla.redhat.com/show_bug.cgi?id=1000116
# See: https://bugs.launchpad.net/nova/+bug/1215593
# Temporary convert supported_instances into a string, while keeping
# the RPC version as JSON. Can be changed when RPC broadcast is removed
data["supported_instances"] = jsonutils.dumps(
self._get_instance_capabilities())
data["vcpus"] = self._get_vcpu_total()
data["memory_mb"] = self._get_memory_mb_total()
data["local_gb"] = disk_info_dict['total']
data["vcpus_used"] = self._get_vcpu_used()
data["memory_mb_used"] = self._get_memory_mb_used()
data["local_gb_used"] = disk_info_dict['used']
data["hypervisor_type"] = self._host.get_driver_type()
data["hypervisor_version"] = self._host.get_version()
data["hypervisor_hostname"] = self._host.get_hostname()
# TODO(berrange): why do we bother converting the
# libvirt capabilities XML into a special JSON format ?
# The data format is different across all the drivers
# so we could just return the raw capabilities XML
# which 'compare_cpu' could use directly
#
# That said, arch_filter.py now seems to rely on
# the libvirt drivers format which suggests this
# data format needs to be standardized across drivers
data["cpu_info"] = jsonutils.dumps(self._get_cpu_info())
disk_free_gb = disk_info_dict['free']
disk_over_committed = self._get_disk_over_committed_size_total()
available_least = disk_free_gb * units.Gi - disk_over_committed
data['disk_available_least'] = available_least / units.Gi
data['pci_passthrough_devices'] = \
self._get_pci_passthrough_devices()
numa_topology = self._get_host_numa_topology()
if numa_topology:
data['numa_topology'] = numa_topology._to_json()
else:
data['numa_topology'] = None
return data
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.objects.instance.Instance object
:returns
:tempfile: A dict containing the tempfile info on the destination
host
:None: 1. If the instance path is not existing.
2. If the image backend is shared block storage type.
"""
if self.image_backend.backend().is_shared_block_storage():
return None
dirpath = libvirt_utils.get_instance_path(instance)
if not os.path.exists(dirpath):
return None
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to verify with other "
"compute node that the instance is on "
"the same shared storage.",
tmp_file, instance=instance)
os.close(fd)
return {"filename": tmp_file}
def check_instance_shared_storage_remote(self, context, data):
return os.path.exists(data['filename'])
def check_instance_shared_storage_cleanup(self, context, data):
fileutils.delete_if_exists(data["filename"])
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing:
:filename: name of the tmpfile under CONF.instances_path
:block_migration: whether this is block migration
:disk_over_commit: disk-over-commit factor on dest host
:disk_available_mb: available disk space on dest host
"""
disk_available_mb = None
if block_migration:
disk_available_gb = dst_compute_info['disk_available_least']
disk_available_mb = \
(disk_available_gb * units.Ki) - CONF.reserved_host_disk_mb
# Compare CPU
if not instance.vcpu_model or not instance.vcpu_model.model:
source_cpu_info = src_compute_info['cpu_info']
self._compare_cpu(None, source_cpu_info)
else:
self._compare_cpu(instance.vcpu_model, None)
# Create file on storage, to be checked on source host
filename = self._create_shared_storage_test_file()
return {"filename": filename,
"image_type": CONF.libvirt.images_type,
"block_migration": block_migration,
"disk_over_commit": disk_over_commit,
"disk_available_mb": disk_available_mb}
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
"""
filename = dest_check_data["filename"]
self._cleanup_shared_storage_test_file(filename)
def check_can_live_migrate_source(self, context, instance,
dest_check_data,
block_device_info=None):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:param block_device_info: result of _get_instance_block_device_info
:returns: a dict containing migration info
"""
# Checking shared storage connectivity
# if block migration, instances_paths should not be on shared storage.
source = CONF.host
dest_check_data.update({'is_shared_instance_path':
self._check_shared_storage_test_file(
dest_check_data['filename'])})
dest_check_data.update({'is_shared_block_storage':
self._is_shared_block_storage(instance, dest_check_data,
block_device_info)})
if dest_check_data['block_migration']:
if (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Block migration can not be used "
"with shared storage.")
raise exception.InvalidLocalStorage(reason=reason, path=source)
self._assert_dest_node_has_enough_disk(context, instance,
dest_check_data['disk_available_mb'],
dest_check_data['disk_over_commit'],
block_device_info)
elif not (dest_check_data['is_shared_block_storage'] or
dest_check_data['is_shared_instance_path']):
reason = _("Live migration can not be used "
"without shared storage.")
raise exception.InvalidSharedStorage(reason=reason, path=source)
# NOTE(mikal): include the instance directory name here because it
# doesn't yet exist on the destination but we want to force that
# same name to be used
instance_path = libvirt_utils.get_instance_path(instance,
relative=True)
dest_check_data['instance_relative_path'] = instance_path
# NOTE(danms): Emulate this old flag in case we're talking to
# an older client (<= Juno). We can remove this when we bump the
# compute RPC API to 4.0.
dest_check_data['is_shared_storage'] = (
dest_check_data['is_shared_instance_path'])
return dest_check_data
def _is_shared_block_storage(self, instance, dest_check_data,
block_device_info=None):
"""Check if all block storage of an instance can be shared
between source and destination of a live migration.
Returns true if the instance is volume backed and has no local disks,
or if the image backend is the same on source and destination and the
backend shares block storage between compute nodes.
:param instance: nova.objects.instance.Instance object
:param dest_check_data: dict with boolean fields image_type,
is_shared_instance_path, and is_volume_backed
"""
if (CONF.libvirt.images_type == dest_check_data.get('image_type') and
self.image_backend.backend().is_shared_block_storage()):
# NOTE(dgenin): currently true only for RBD image backend
return True
if (dest_check_data.get('is_shared_instance_path') and
self.image_backend.backend().is_file_in_instance_path()):
# NOTE(angdraug): file based image backends (Raw, Qcow2)
# place block device files under the instance path
return True
if (dest_check_data.get('is_volume_backed') and
not bool(jsonutils.loads(
self.get_instance_disk_info(instance,
block_device_info)))):
return True
return False
def _assert_dest_node_has_enough_disk(self, context, instance,
available_mb, disk_over_commit,
block_device_info=None):
"""Checks if destination has enough disk for block migration."""
# Libvirt supports qcow2 disk format,which is usually compressed
# on compute nodes.
# Real disk image (compressed) may enlarged to "virtual disk size",
# that is specified as the maximum disk size.
# (See qemu-img -f path-to-disk)
# Scheduler recognizes destination host still has enough disk space
# if real disk size < available disk size
# if disk_over_commit is True,
# otherwise virtual disk size < available disk size.
available = 0
if available_mb:
available = available_mb * units.Mi
ret = self.get_instance_disk_info(instance,
block_device_info=block_device_info)
disk_infos = jsonutils.loads(ret)
necessary = 0
if disk_over_commit:
for info in disk_infos:
necessary += int(info['disk_size'])
else:
for info in disk_infos:
necessary += int(info['virt_disk_size'])
# Check that available disk > necessary disk
if (available - necessary) < 0:
reason = (_('Unable to migrate %(instance_uuid)s: '
'Disk of instance is too large(available'
' on destination host:%(available)s '
'< need:%(necessary)s)') %
{'instance_uuid': instance.uuid,
'available': available,
'necessary': necessary})
raise exception.MigrationPreCheckError(reason=reason)
def _compare_cpu(self, guest_cpu, host_cpu_str):
"""Check the host is compatible with the requested CPU
:param guest_cpu: nova.objects.VirtCPUModel or None
:param host_cpu_str: JSON from _get_cpu_info() method
If the 'guest_cpu' parameter is not None, this will be
validated for migration compatibility with the host.
Otherwise the 'host_cpu_str' JSON string will be used for
validation.
:returns:
None. if given cpu info is not compatible to this server,
raise exception.
"""
# NOTE(berendt): virConnectCompareCPU not working for Xen
if CONF.libvirt.virt_type not in ['qemu', 'kvm']:
return
if guest_cpu is None:
info = jsonutils.loads(host_cpu_str)
LOG.info(_LI('Instance launched has CPU info: %s'), host_cpu_str)
cpu = vconfig.LibvirtConfigCPU()
cpu.arch = info['arch']
cpu.model = info['model']
cpu.vendor = info['vendor']
cpu.sockets = info['topology']['sockets']
cpu.cores = info['topology']['cores']
cpu.threads = info['topology']['threads']
for f in info['features']:
cpu.add_feature(vconfig.LibvirtConfigCPUFeature(f))
else:
cpu = self._vcpu_model_to_cpu_config(guest_cpu)
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
# unknown character exists in xml, then libvirt complains
try:
ret = self._conn.compareCPU(cpu.to_xml(), 0)
except libvirt.libvirtError as e:
LOG.error(m, {'ret': e, 'u': u})
raise exception.MigrationPreCheckError(
reason=m % {'ret': e, 'u': u})
if ret <= 0:
LOG.error(m, {'ret': ret, 'u': u})
raise exception.InvalidCPUInfo(reason=m % {'ret': ret, 'u': u})
def _create_shared_storage_test_file(self):
"""Makes tmpfile under CONF.instances_path."""
dirpath = CONF.instances_path
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
LOG.debug("Creating tmpfile %s to notify to other "
"compute nodes that they should mount "
"the same storage.", tmp_file)
os.close(fd)
return os.path.basename(tmp_file)
def _check_shared_storage_test_file(self, filename):
"""Confirms existence of the tmpfile under CONF.instances_path.
Cannot confirm tmpfile return False.
"""
tmp_file = os.path.join(CONF.instances_path, filename)
if not os.path.exists(tmp_file):
return False
else:
return True
def _cleanup_shared_storage_test_file(self, filename):
"""Removes existence of the tmpfile under CONF.instances_path."""
tmp_file = os.path.join(CONF.instances_path, filename)
os.remove(tmp_file)
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Ensure that an instance's filtering rules are enabled.
When migrating an instance, we need the filtering rules to
be configured on the destination host before starting the
migration.
Also, when restarting the compute service, we need to ensure
that filtering rules exist for all running services.
"""
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# nwfilters may be defined in a separate thread in the case
# of libvirt non-blocking mode, so we wait for completion
timeout_count = range(CONF.live_migration_retry_count)
while timeout_count:
if self.firewall_driver.instance_filter_exists(instance,
network_info):
break
timeout_count.pop()
if len(timeout_count) == 0:
msg = _('The firewall filter for %s does not exist')
raise exception.NovaException(msg % instance.name)
greenthread.sleep(1)
def filter_defer_apply_on(self):
self.firewall_driver.filter_defer_apply_on()
def filter_defer_apply_off(self):
self.firewall_driver.filter_defer_apply_off()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Spawning live_migration operation for distributing high-load.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
"""
# 'dest' will be substituted into 'migration_uri' so ensure
# it does't contain any characters that could be used to
# exploit the URI accepted by libivrt
if not libvirt_utils.is_valid_hostname(dest):
raise exception.InvalidHostname(hostname=dest)
greenthread.spawn(self._live_migration, context, instance, dest,
post_method, recover_method, block_migration,
migrate_data)
def _update_xml(self, xml_str, volume, listen_addrs):
xml_doc = etree.fromstring(xml_str)
if volume:
xml_doc = self._update_volume_xml(xml_doc, volume)
if listen_addrs:
xml_doc = self._update_graphics_xml(xml_doc, listen_addrs)
else:
self._check_graphics_addresses_can_live_migrate(listen_addrs)
return etree.tostring(xml_doc)
def _update_graphics_xml(self, xml_doc, listen_addrs):
# change over listen addresses
for dev in xml_doc.findall('./devices/graphics'):
gr_type = dev.get('type')
listen_tag = dev.find('listen')
if gr_type in ('vnc', 'spice'):
if listen_tag is not None:
listen_tag.set('address', listen_addrs[gr_type])
if dev.get('listen') is not None:
dev.set('listen', listen_addrs[gr_type])
return xml_doc
def _update_volume_xml(self, xml_doc, volume):
"""Update XML using device information of destination host."""
# Update volume xml
parser = etree.XMLParser(remove_blank_text=True)
disk_nodes = xml_doc.findall('./devices/disk')
for pos, disk_dev in enumerate(disk_nodes):
serial_source = disk_dev.findtext('serial')
if serial_source is None or volume.get(serial_source) is None:
continue
if ('connection_info' not in volume[serial_source] or
'disk_info' not in volume[serial_source]):
continue
conf = self._get_volume_config(
volume[serial_source]['connection_info'],
volume[serial_source]['disk_info'])
xml_doc2 = etree.XML(conf.to_xml(), parser)
serial_dest = xml_doc2.findtext('serial')
# Compare source serial and destination serial number.
# If these serial numbers match, continue the process.
if (serial_dest and (serial_source == serial_dest)):
LOG.debug("Find same serial number: pos=%(pos)s, "
"serial=%(num)s",
{'pos': pos, 'num': serial_source})
for cnt, item_src in enumerate(disk_dev):
# If source and destination have same item, update
# the item using destination value.
for item_dst in xml_doc2.findall(item_src.tag):
disk_dev.remove(item_src)
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
# If destination has additional items, thses items should be
# added here.
for item_dst in list(xml_doc2):
item_dst.tail = None
disk_dev.insert(cnt, item_dst)
return xml_doc
def _check_graphics_addresses_can_live_migrate(self, listen_addrs):
LOCAL_ADDRS = ('0.0.0.0', '127.0.0.1', '::', '::1')
local_vnc = CONF.vncserver_listen in LOCAL_ADDRS
local_spice = CONF.spice.server_listen in LOCAL_ADDRS
if ((CONF.vnc_enabled and not local_vnc) or
(CONF.spice.enabled and not local_spice)):
msg = _('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag or your'
' destination node does not support'
' retrieving listen addresses. In order'
' for live migration to work properly, you'
' must configure the graphics (VNC and/or'
' SPICE) listen addresses to be either'
' the catch-all address (0.0.0.0 or ::) or'
' the local address (127.0.0.1 or ::1).')
raise exception.MigrationError(reason=msg)
if listen_addrs is not None:
dest_local_vnc = listen_addrs['vnc'] in LOCAL_ADDRS
dest_local_spice = listen_addrs['spice'] in LOCAL_ADDRS
if ((CONF.vnc_enabled and not dest_local_vnc) or
(CONF.spice.enabled and not dest_local_spice)):
LOG.warn(_LW('Your libvirt version does not support the'
' VIR_DOMAIN_XML_MIGRATABLE flag, and the '
' graphics (VNC and/or SPICE) listen'
' addresses on the destination node do not'
' match the addresses on the source node.'
' Since the source node has listen'
' addresses set to either the catch-all'
' address (0.0.0.0 or ::) or the local'
' address (127.0.0.1 or ::1), the live'
' migration will succeed, but the VM will'
' continue to listen on the current'
' addresses.'))
def _live_migration_operation(self, context, instance, dest,
block_migration, migrate_data, dom):
"""Invoke the live migration operation
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
:param dom: the libvirt domain object
This method is intended to be run in a background thread and will
block that thread until the migration is finished or failed.
"""
try:
if block_migration:
flaglist = CONF.libvirt.block_migration_flag.split(',')
else:
flaglist = CONF.libvirt.live_migration_flag.split(',')
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
logical_sum = reduce(lambda x, y: x | y, flagvals)
pre_live_migrate_data = (migrate_data or {}).get(
'pre_live_migration_result', {})
listen_addrs = pre_live_migrate_data.get('graphics_listen_addrs')
volume = pre_live_migrate_data.get('volume')
migratable_flag = getattr(libvirt, 'VIR_DOMAIN_XML_MIGRATABLE',
None)
if (migratable_flag is None or
(listen_addrs is None and not volume)):
self._check_graphics_addresses_can_live_migrate(listen_addrs)
dom.migrateToURI(CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
old_xml_str = dom.XMLDesc(migratable_flag)
new_xml_str = self._update_xml(old_xml_str,
volume,
listen_addrs)
try:
dom.migrateToURI2(CONF.libvirt.live_migration_uri % dest,
None,
new_xml_str,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
except libvirt.libvirtError as ex:
# NOTE(mriedem): There is a bug in older versions of
# libvirt where the VIR_DOMAIN_XML_MIGRATABLE flag causes
# virDomainDefCheckABIStability to not compare the source
# and target domain xml's correctly for the CPU model.
# We try to handle that error here and attempt the legacy
# migrateToURI path, which could fail if the console
# addresses are not correct, but in that case we have the
# _check_graphics_addresses_can_live_migrate check in place
# to catch it.
# TODO(mriedem): Remove this workaround when
# Red Hat BZ #1141838 is closed.
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_CONFIG_UNSUPPORTED:
LOG.warn(_LW('An error occurred trying to live '
'migrate. Falling back to legacy live '
'migrate flow. Error: %s'), ex,
instance=instance)
self._check_graphics_addresses_can_live_migrate(
listen_addrs)
dom.migrateToURI(
CONF.libvirt.live_migration_uri % dest,
logical_sum,
None,
CONF.libvirt.live_migration_bandwidth)
else:
raise
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Live Migration failure: %s"), e,
instance=instance)
# If 'migrateToURI' fails we don't know what state the
# VM instances on each host are in. Possibilities include
#
# 1. src==running, dst==none
#
# Migration failed & rolled back, or never started
#
# 2. src==running, dst==paused
#
# Migration started but is still ongoing
#
# 3. src==paused, dst==paused
#
# Migration data transfer completed, but switchover
# is still ongoing, or failed
#
# 4. src==paused, dst==running
#
# Migration data transfer completed, switchover
# happened but cleanup on source failed
#
# 5. src==none, dst==running
#
# Migration fully succeeded.
#
# Libvirt will aim to complete any migration operation
# or roll it back. So even if the migrateToURI call has
# returned an error, if the migration was not finished
# libvirt should clean up.
#
# So we take the error raise here with a pinch of salt
# and rely on the domain job info status to figure out
# what really happened to the VM, which is a much more
# reliable indicator.
#
# In particular we need to try very hard to ensure that
# Nova does not "forget" about the guest. ie leaving it
# running on a different host to the one recorded in
# the database, as that would be a serious resource leak
LOG.debug("Migration operation thread has finished",
instance=instance)
def _live_migration_monitor(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data, dom, finish_event):
n = 0
while True:
info = host.DomainJobInfo.for_domain(dom)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Annoyingly this could indicate many possible
# states, so we must fix the mess:
#
# 1. Migration has not yet begun
# 2. Migration has stopped due to failure
# 3. Migration has stopped due to completion
#
# We can detect option 1 by seeing if thread is still
# running. We can distinguish 2 vs 3 by seeing if the
# VM still exists & running on the current host
#
if not finish_event.ready():
LOG.debug("Operation thread is still running",
instance=instance)
# Leave type untouched
else:
try:
if dom.isActive():
LOG.debug("VM running on src, migration failed",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_FAILED
else:
LOG.debug("VM is shutoff, migration finished",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_COMPLETED
except libvirt.libvirtError as ex:
LOG.debug("Error checking domain status %(ex)s",
ex, instance=instance)
if ex.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
LOG.debug("VM is missing, migration finished",
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_COMPLETED
else:
LOG.info(_LI("Error %(ex)s, migration failed"),
instance=instance)
info.type = libvirt.VIR_DOMAIN_JOB_FAILED
if info.type != libvirt.VIR_DOMAIN_JOB_NONE:
LOG.debug("Fixed incorrect job type to be %d",
info.type, instance=instance)
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Migration is not yet started
LOG.debug("Migration not running yet",
instance=instance)
elif info.type == libvirt.VIR_DOMAIN_JOB_UNBOUNDED:
# We loop every 500ms, so don't log on every
# iteration to avoid spamming logs for long
# running migrations. Just once every 5 secs
# is sufficient for developers to debug problems.
# We log once every 30 seconds at info to help
# admins see slow running migration operations
# when debug logs are off.
if (n % 10) == 0:
# Ignoring memory_processed, as due to repeated
# dirtying of data, this can be way larger than
# memory_total. Best to just look at what's
# remaining to copy and ignore what's done already
#
# TODO(berrange) perhaps we could include disk
# transfer stats in the progress too, but it
# might make memory info more obscure as large
# disk sizes might dwarf memory size
progress = 0
if info.memory_total != 0:
progress = round(info.memory_remaining *
100 / info.memory_total)
instance.progress = 100 - progress
instance.save()
lg = LOG.debug
if (n % 60) == 0:
lg = LOG.info
lg(_LI("Migration running for %(secs)d secs, "
"memory %(progress)d%% remaining; "
"(bytes processed=%(processed)d, "
"remaining=%(remaining)d, "
"total=%(total)d)"),
{"secs": n / 2, "progress": progress,
"processed": info.memory_processed,
"remaining": info.memory_remaining,
"total": info.memory_total}, instance=instance)
# Migration is still running
#
# This is where we'd wire up calls to change live
# migration status. eg change max downtime, cancel
# the operation, change max bandwidth
n = n + 1
elif info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
# Migration is all done
LOG.info(_LI("Migration operation has completed"),
instance=instance)
post_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_FAILED:
# Migration did not succeed
LOG.error(_LE("Migration operation has aborted"),
instance=instance)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
elif info.type == libvirt.VIR_DOMAIN_JOB_CANCELLED:
# Migration was stopped by admin
LOG.warn(_LW("Migration operation was cancelled"),
instance=instance)
recover_method(context, instance, dest, block_migration,
migrate_data)
break
else:
LOG.warn(_LW("Unexpected migration job type: %d"),
info.type, instance=instance)
time.sleep(0.5)
def _live_migration(self, context, instance, dest, post_method,
recover_method, block_migration,
migrate_data):
"""Do live migration.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, do block migration.
:param migrate_data: implementation specific params
This fires off a new thread to run the blocking migration
operation, and then this thread monitors the progress of
migration and controls its operation
"""
dom = self._host.get_domain(instance)
opthread = greenthread.spawn(self._live_migration_operation,
context, instance, dest,
block_migration,
migrate_data, dom)
finish_event = eventlet.event.Event()
def thread_finished(thread, event):
LOG.debug("Migration operation thread notification",
instance=instance)
event.send()
opthread.link(thread_finished, finish_event)
# Let eventlet schedule the new thread right away
time.sleep(0)
try:
LOG.debug("Starting monitoring of live migration",
instance=instance)
self._live_migration_monitor(context, instance, dest,
post_method, recover_method,
block_migration, migrate_data,
dom, finish_event)
except Exception as ex:
LOG.warn(_LW("Error monitoring migration: %(ex)s"),
{"ex": ex}, instance=instance, exc_info=True)
raise
finally:
LOG.debug("Live migration monitoring is all done",
instance=instance)
def _try_fetch_image(self, context, path, image_id, instance,
fallback_from_host=None):
try:
libvirt_utils.fetch_image(context, path,
image_id,
instance.user_id,
instance.project_id)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore on "
"image service, attempting to copy image "
"from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
libvirt_utils.copy_image(src=path, dest=path,
host=fallback_from_host,
receive=True)
def _fetch_instance_kernel_ramdisk(self, context, instance,
fallback_from_host=None):
"""Download kernel and ramdisk for instance in instance directory."""
instance_dir = libvirt_utils.get_instance_path(instance)
if instance.kernel_id:
self._try_fetch_image(context,
os.path.join(instance_dir, 'kernel'),
instance.kernel_id,
instance, fallback_from_host)
if instance.ramdisk_id:
self._try_fetch_image(context,
os.path.join(instance_dir, 'ramdisk'),
instance.ramdisk_id,
instance, fallback_from_host)
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration."""
try:
self.destroy(context, instance, network_info, block_device_info,
destroy_disks, migrate_data)
finally:
# NOTE(gcb): Failed block live migration may leave instance
# directory at destination node, ensure it is always deleted.
is_shared_instance_path = True
if migrate_data:
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
shutil.rmtree(instance_dir)
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Preparation live migration."""
# Steps for volume backed instance live migration w/o shared storage.
is_shared_block_storage = True
is_shared_instance_path = True
is_block_migration = True
if migrate_data:
is_shared_block_storage = migrate_data.get(
'is_shared_block_storage', True)
is_shared_instance_path = migrate_data.get(
'is_shared_instance_path', True)
is_block_migration = migrate_data.get('block_migration', True)
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
if not (is_shared_instance_path and is_shared_block_storage):
# NOTE(dims): Using config drive with iso format does not work
# because of a bug in libvirt with read only devices. However
# one can use vfat as config_drive_format which works fine.
# Please see bug/1246201 for details on the libvirt bug.
if CONF.config_drive_format != 'vfat':
if configdrive.required_by(instance):
raise exception.NoLiveMigrationForConfigDriveInLibVirt()
if not is_shared_instance_path:
instance_dir = libvirt_utils.get_instance_path_at_destination(
instance, migrate_data)
if os.path.exists(instance_dir):
raise exception.DestinationDiskExists(path=instance_dir)
os.mkdir(instance_dir)
if not is_shared_block_storage:
# Ensure images and backing files are present.
self._create_images_and_backing(
context, instance, instance_dir, disk_info,
fallback_from_host=instance.host)
if not (is_block_migration or is_shared_instance_path):
# NOTE(angdraug): when block storage is shared between source and
# destination and instance path isn't (e.g. volume backed or rbd
# backed instance), instance path on destination has to be prepared
# Touch the console.log file, required by libvirt.
console_file = self._get_console_log_path(instance)
libvirt_utils.file_open(console_file, 'a').close()
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(context, instance)
# Establishing connection to volume server.
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, vol)
self._connect_volume(connection_info, disk_info)
if is_block_migration and len(block_device_mapping):
# NOTE(stpierre): if this instance has mapped volumes,
# we can't do a block migration, since that will
# result in volumes being copied from themselves to
# themselves, which is a recipe for disaster.
LOG.error(
_LE('Cannot block migrate instance %s with mapped volumes'),
instance.uuid)
msg = (_('Cannot block migrate instance %s with mapped volumes') %
instance.uuid)
raise exception.MigrationError(reason=msg)
# We call plug_vifs before the compute manager calls
# ensure_filtering_rules_for_instance, to ensure bridge is set up
# Retry operation is necessary because continuously request comes,
# concurrent request occurs to iptables, then it complains.
max_retry = CONF.live_migration_retry_count
for cnt in range(max_retry):
try:
self.plug_vifs(instance, network_info)
break
except processutils.ProcessExecutionError:
if cnt == max_retry - 1:
raise
else:
LOG.warn(_LW('plug_vifs() failed %(cnt)d. Retry up to '
'%(max_retry)d.'),
{'cnt': cnt,
'max_retry': max_retry},
instance=instance)
greenthread.sleep(1)
# Store vncserver_listen and latest disk device info
res_data = {'graphics_listen_addrs': {}, 'volume': {}}
res_data['graphics_listen_addrs']['vnc'] = CONF.vncserver_listen
res_data['graphics_listen_addrs']['spice'] = CONF.spice.server_listen
for vol in block_device_mapping:
connection_info = vol['connection_info']
if connection_info.get('serial'):
serial = connection_info['serial']
res_data['volume'][serial] = {'connection_info': {},
'disk_info': {}}
res_data['volume'][serial]['connection_info'] = \
connection_info
disk_info = blockinfo.get_info_from_bdm(
CONF.libvirt.virt_type, image_meta, vol)
res_data['volume'][serial]['disk_info'] = disk_info
return res_data
def _try_fetch_image_cache(self, image, fetch_func, context, filename,
image_id, instance, size,
fallback_from_host=None):
try:
image.cache(fetch_func=fetch_func,
context=context,
filename=filename,
image_id=image_id,
user_id=instance.user_id,
project_id=instance.project_id,
size=size)
except exception.ImageNotFound:
if not fallback_from_host:
raise
LOG.debug("Image %(image_id)s doesn't exist anymore "
"on image service, attempting to copy "
"image from %(host)s",
{'image_id': image_id, 'host': fallback_from_host})
def copy_from_host(target, max_size):
libvirt_utils.copy_image(src=target,
dest=target,
host=fallback_from_host,
receive=True)
image.cache(fetch_func=copy_from_host,
filename=filename)
def _create_images_and_backing(self, context, instance, instance_dir,
disk_info_json, fallback_from_host=None):
""":param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param instance_dir:
instance path to use, calculated externally to handle block
migrating an instance with an old style instance path
:param disk_info_json:
json strings specified in get_instance_disk_info
:param fallback_from_host:
host where we can retrieve images if the glance images are
not available.
"""
if not disk_info_json:
disk_info = []
else:
disk_info = jsonutils.loads(disk_info_json)
for info in disk_info:
base = os.path.basename(info['path'])
# Get image type and create empty disk image, and
# create backing file in case of qcow2.
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file'] and not os.path.exists(instance_disk):
libvirt_utils.create_image(info['type'], instance_disk,
info['virt_disk_size'])
elif info['backing_file']:
# Creating backing file follows same way as spawning instances.
cache_name = os.path.basename(info['backing_file'])
image = self.image_backend.image(instance,
instance_disk,
CONF.libvirt.images_type)
if cache_name.startswith('ephemeral'):
image.cache(fetch_func=self._create_ephemeral,
fs_label=cache_name,
os_type=instance.os_type,
filename=cache_name,
size=info['virt_disk_size'],
ephemeral_size=instance.ephemeral_gb)
elif cache_name.startswith('swap'):
inst_type = instance.get_flavor()
swap_mb = inst_type.swap
image.cache(fetch_func=self._create_swap,
filename="swap_%s" % swap_mb,
size=swap_mb * units.Mi,
swap_mb=swap_mb)
else:
self._try_fetch_image_cache(image,
libvirt_utils.fetch_image,
context, cache_name,
instance.image_ref,
instance,
info['virt_disk_size'],
fallback_from_host)
# if image has kernel and ramdisk, just download
# following normal way.
self._fetch_instance_kernel_ramdisk(
context, instance, fallback_from_host=fallback_from_host)
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
# Disconnect from volume server
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
self.unplug_vifs(instance, network_info)
def post_live_migration_at_destination(self, context,
instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
# Define migrated instance, otherwise, suspend/destroy does not work.
dom_list = self._conn.listDefinedDomains()
if instance.name not in dom_list:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# In case of block migration, destination does not have
# libvirt.xml
disk_info = blockinfo.get_disk_info(
CONF.libvirt.virt_type, instance,
image_meta, block_device_info)
xml = self._get_guest_xml(context, instance,
network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
self._conn.defineXML(xml)
def _get_instance_disk_info(self, instance_name, xml,
block_device_info=None):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
volume_devices = set()
for vol in block_device_mapping:
disk_dev = vol['mount_device'].rpartition("/")[2]
volume_devices.add(disk_dev)
disk_info = []
doc = etree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
path_nodes = doc.findall('.//devices/disk/source')
driver_nodes = doc.findall('.//devices/disk/driver')
target_nodes = doc.findall('.//devices/disk/target')
for cnt, path_node in enumerate(path_nodes):
disk_type = disk_nodes[cnt].get('type')
path = path_node.get('file') or path_node.get('dev')
target = target_nodes[cnt].attrib['dev']
if not path:
LOG.debug('skipping disk for %s as it does not have a path',
instance_name)
continue
if disk_type not in ['file', 'block']:
LOG.debug('skipping disk because it looks like a volume', path)
continue
if target in volume_devices:
LOG.debug('skipping disk %(path)s (%(target)s) as it is a '
'volume', {'path': path, 'target': target})
continue
# get the real disk size or
# raise a localized error if image is unavailable
if disk_type == 'file':
dk_size = int(os.path.getsize(path))
elif disk_type == 'block':
dk_size = lvm.get_volume_size(path)
disk_type = driver_nodes[cnt].get('type')
if disk_type == "qcow2":
backing_file = libvirt_utils.get_disk_backing_file(path)
virt_size = disk.get_disk_size(path)
over_commit_size = int(virt_size) - dk_size
else:
backing_file = ""
virt_size = dk_size
over_commit_size = 0
disk_info.append({'type': disk_type,
'path': path,
'virt_disk_size': virt_size,
'backing_file': backing_file,
'disk_size': dk_size,
'over_committed_disk_size': over_commit_size})
return jsonutils.dumps(disk_info)
def get_instance_disk_info(self, instance,
block_device_info=None):
try:
dom = self._host.get_domain(instance)
xml = dom.XMLDesc(0)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
msg = (_('Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] '
'%(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
LOG.warn(msg)
raise exception.InstanceNotFound(instance_id=instance.name)
return self._get_instance_disk_info(instance.name, xml,
block_device_info)
def _get_disk_over_committed_size_total(self):
"""Return total over committed disk size for all instances."""
# Disk size that all instance uses : virtual_size - disk_size
disk_over_committed_size = 0
for dom in self._host.list_instance_domains():
try:
xml = dom.XMLDesc(0)
disk_infos = jsonutils.loads(
self._get_instance_disk_info(dom.name(), xml))
for info in disk_infos:
disk_over_committed_size += int(
info['over_committed_disk_size'])
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
LOG.warn(_LW(
'Error from libvirt while getting description of '
'%(instance_name)s: [Error Code %(error_code)s] %(ex)s'
) % {'instance_name': dom.name(),
'error_code': error_code,
'ex': ex})
except OSError as e:
if e.errno == errno.ENOENT:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but disk file was removed by concurrent '
'operations such as resize.'),
{'i_name': dom.name()})
elif e.errno == errno.EACCES:
LOG.warn(_LW('Periodic task is updating the host stat, '
'it is trying to get disk %(i_name)s, '
'but access is denied. It is most likely '
'due to a VM that exists on the compute '
'node but is not managed by Nova.'),
{'i_name': dom.name()})
else:
raise
except exception.VolumeBDMPathNotFound as e:
LOG.warn(_LW('Periodic task is updating the host stats, '
'it is trying to get disk info for %(i_name)s, '
'but the backing volume block device was removed '
'by concurrent operations such as resize. '
'Error: %(error)s'),
{'i_name': dom.name(),
'error': e})
# NOTE(gtt116): give other tasks a chance.
greenthread.sleep(0)
return disk_over_committed_size
def unfilter_instance(self, instance, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance,
network_info=network_info)
def get_available_nodes(self, refresh=False):
return [self._host.get_hostname()]
def get_host_cpu_stats(self):
"""Return the current CPU state of the host."""
# Extract node's CPU statistics.
stats = self._conn.getCPUStats(libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._conn.getInfo()[3]
return stats
def get_host_uptime(self):
"""Returns the result of calling "uptime"."""
out, err = utils.execute('env', 'LANG=C', 'uptime')
return out
def manage_image_cache(self, context, all_instances):
"""Manage the local cache of images."""
self.image_cache_manager.update(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize,
shared_storage=False):
"""Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
utils.execute('mv', inst_base_resize, inst_base)
if not shared_storage:
utils.execute('ssh', dest, 'rm', '-rf', inst_base)
except Exception:
pass
def _is_storage_shared_with(self, dest, inst_base):
# NOTE (rmk): There are two methods of determining whether we are
# on the same filesystem: the source and dest IP are the
# same, or we create a file on the dest system via SSH
# and check whether the source system can also see it.
shared_storage = (dest == self.get_host_ip_addr())
if not shared_storage:
tmp_file = uuid.uuid4().hex + '.tmp'
tmp_path = os.path.join(inst_base, tmp_file)
try:
utils.execute('ssh', dest, 'touch', tmp_path)
if os.path.exists(tmp_path):
shared_storage = True
os.unlink(tmp_path)
else:
utils.execute('ssh', dest, 'rm', tmp_path)
except Exception:
pass
return shared_storage
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.ephemeral_gb)
# Checks if the migration needs a disk resize down.
root_down = flavor['root_gb'] < instance.root_gb
ephemeral_down = flavor['ephemeral_gb'] < eph_size
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
if not shared_storage:
try:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
self.power_off(instance, timeout, retry_interval)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
active_flavor = instance.get_flavor()
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
def _wait_for_running(self, instance):
state = self.get_info(instance).state
if state == power_state.RUNNING:
LOG.info(_LI("Instance running successfully."), instance=instance)
raise loopingcall.LoopingCallDone()
@staticmethod
def _disk_size_from_instance(instance, info):
"""Determines the disk size from instance properties
Returns the disk size by using the disk name to determine whether it
is a root or an ephemeral disk, then by checking properties of the
instance returns the size converted to bytes.
Returns 0 if the disk name not match (disk, disk.local).
"""
fname = os.path.basename(info['path'])
if fname == 'disk':
size = instance.root_gb
elif fname == 'disk.local':
size = instance.ephemeral_gb
else:
size = 0
return size * units.Gi
@staticmethod
def _disk_raw_to_qcow2(path):
"""Converts a raw disk to qcow2."""
path_qcow = path + '_qcow'
utils.execute('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, path_qcow)
utils.execute('mv', path_qcow, path)
@staticmethod
def _disk_qcow2_to_raw(path):
"""Converts a qcow2 disk to raw."""
path_raw = path + '_raw'
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, path_raw)
utils.execute('mv', path_raw, path)
def _disk_resize(self, info, size):
"""Attempts to resize a disk to size
Attempts to resize a disk by checking the capabilities and
preparing the format, then calling disk.api.extend.
Note: Currently only support disk extend.
"""
# If we have a non partitioned image that we can extend
# then ensure we're in 'raw' format so we can extend file system.
fmt, org = [info['type']] * 2
pth = info['path']
if (size and fmt == 'qcow2' and
disk.can_resize_image(pth, size) and
disk.is_image_extendable(pth, use_cow=True)):
self._disk_qcow2_to_raw(pth)
fmt = 'raw'
if size:
use_cow = fmt == 'qcow2'
disk.extend(pth, size, use_cow=use_cow)
if fmt != org:
# back to qcow2 (no backing_file though) so that snapshot
# will be available
self._disk_raw_to_qcow2(pth)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
size = self._disk_size_from_instance(instance, info)
if resize_instance:
self._disk_resize(info, size)
if info['type'] == 'raw' and CONF.use_cow_images:
self._disk_raw_to_qcow2(info['path'])
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image do nothing if a target file exists.
self._create_image(context, instance, disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False,
fallback_from_host=migration.source_compute)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def _cleanup_failed_migration(self, inst_base):
"""Make sure that a failed migrate doesn't prevent us from rolling
back in a revert.
"""
try:
shutil.rmtree(inst_base)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_revert_migration",
instance=instance)
inst_base = libvirt_utils.get_instance_path(instance)
inst_base_resize = inst_base + "_resize"
# NOTE(danms): if we're recovering from a failed migration,
# make sure we don't have a left-over same-host base directory
# that would conflict. Also, don't fail on the rename if the
# failure happened early.
if os.path.exists(inst_base_resize):
self._cleanup_failed_migration(inst_base)
utils.execute('mv', inst_base_resize, inst_base)
image_ref = instance.get('image_ref')
image_meta = compute_utils.get_image_metadata(context,
self._image_api,
image_ref,
instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
@staticmethod
def _get_io_devices(xml_doc):
"""get the list of io devices from the xml document."""
result = {"volumes": [], "ifaces": []}
try:
doc = etree.fromstring(xml_doc)
except Exception:
return result
blocks = [('./devices/disk', 'volumes'),
('./devices/interface', 'ifaces')]
for block, key in blocks:
section = doc.findall(block)
for node in section:
for child in node.getchildren():
if child.tag == 'target' and child.get('dev'):
result[key].append(child.get('dev'))
return result
def get_diagnostics(self, instance):
domain = self._host.get_domain(instance)
output = {}
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
for i in range(len(cputime)):
output["cpu" + str(i) + "_time"] = cputime[i][2]
except libvirt.libvirtError:
pass
# get io status
xml = domain.XMLDesc(0)
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
output[guest_disk + "_read_req"] = stats[0]
output[guest_disk + "_read"] = stats[1]
output[guest_disk + "_write_req"] = stats[2]
output[guest_disk + "_write"] = stats[3]
output[guest_disk + "_errors"] = stats[4]
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
output[interface + "_rx"] = stats[0]
output[interface + "_rx_packets"] = stats[1]
output[interface + "_rx_errors"] = stats[2]
output[interface + "_rx_drop"] = stats[3]
output[interface + "_tx"] = stats[4]
output[interface + "_tx_packets"] = stats[5]
output[interface + "_tx_errors"] = stats[6]
output[interface + "_tx_drop"] = stats[7]
except libvirt.libvirtError:
pass
output["memory"] = domain.maxMemory()
# memoryStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
mem = domain.memoryStats()
for key in mem.keys():
output["memory-" + key] = mem[key]
except (libvirt.libvirtError, AttributeError):
pass
return output
def get_instance_diagnostics(self, instance):
domain = self._host.get_domain(instance)
xml = domain.XMLDesc(0)
xml_doc = etree.fromstring(xml)
(state, max_mem, mem, num_cpu, cpu_time) = \
self._host.get_domain_info(domain)
config_drive = configdrive.required_by(instance)
launched_at = timeutils.normalize_time(instance.launched_at)
uptime = timeutils.delta_seconds(launched_at,
timeutils.utcnow())
diags = diagnostics.Diagnostics(state=power_state.STATE_MAP[state],
driver='libvirt',
config_drive=config_drive,
hypervisor_os='linux',
uptime=uptime)
diags.memory_details.maximum = max_mem / units.Mi
diags.memory_details.used = mem / units.Mi
# get cpu time, might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
try:
cputime = domain.vcpus()[0]
num_cpus = len(cputime)
for i in range(num_cpus):
diags.add_cpu(time=cputime[i][2])
except libvirt.libvirtError:
pass
# get io status
dom_io = LibvirtDriver._get_io_devices(xml)
for guest_disk in dom_io["volumes"]:
try:
# blockStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.blockStats(guest_disk)
diags.add_disk(read_bytes=stats[1],
read_requests=stats[0],
write_bytes=stats[3],
write_requests=stats[2])
except libvirt.libvirtError:
pass
for interface in dom_io["ifaces"]:
try:
# interfaceStats might launch an exception if the method
# is not supported by the underlying hypervisor being
# used by libvirt
stats = domain.interfaceStats(interface)
diags.add_nic(rx_octets=stats[0],
rx_errors=stats[2],
rx_drop=stats[3],
rx_packets=stats[1],
tx_octets=stats[4],
tx_errors=stats[6],
tx_drop=stats[7],
tx_packets=stats[5])
except libvirt.libvirtError:
pass
# Update mac addresses of interface if stats have been reported
if diags.nic_details:
nodes = xml_doc.findall('./devices/interface/mac')
for index, node in enumerate(nodes):
diags.nic_details[index].mac_address = node.get('address')
return diags
def instance_on_disk(self, instance):
# ensure directories exist and are writable
instance_path = libvirt_utils.get_instance_path(instance)
LOG.debug('Checking instance files accessibility %s', instance_path)
shared_instance_path = os.access(instance_path, os.W_OK)
# NOTE(flwang): For shared block storage scenario, the file system is
# not really shared by the two hosts, but the volume of evacuated
# instance is reachable.
shared_block_storage = (self.image_backend.backend().
is_shared_block_storage())
return shared_instance_path or shared_block_storage
def inject_network_info(self, instance, nw_info):
self.firewall_driver.setup_basic_filtering(instance, nw_info)
def delete_instance_files(self, instance):
target = libvirt_utils.get_instance_path(instance)
# A resize may be in progress
target_resize = target + '_resize'
# Other threads may attempt to rename the path, so renaming the path
# to target + '_del' (because it is atomic) and iterating through
# twice in the unlikely event that a concurrent rename occurs between
# the two rename attempts in this method. In general this method
# should be fairly thread-safe without these additional checks, since
# other operations involving renames are not permitted when the task
# state is not None and the task state should be set to something
# other than None by the time this method is invoked.
target_del = target + '_del'
for i in six.moves.range(2):
try:
utils.execute('mv', target, target_del)
break
except Exception:
pass
try:
utils.execute('mv', target_resize, target_del)
break
except Exception:
pass
# Either the target or target_resize path may still exist if all
# rename attempts failed.
remaining_path = None
for p in (target, target_resize):
if os.path.exists(p):
remaining_path = p
break
# A previous delete attempt may have been interrupted, so target_del
# may exist even if all rename attempts during the present method
# invocation failed due to the absence of both target and
# target_resize.
if not remaining_path and os.path.exists(target_del):
LOG.info(_LI('Deleting instance files %s'), target_del,
instance=instance)
remaining_path = target_del
try:
shutil.rmtree(target_del)
except OSError as e:
LOG.error(_LE('Failed to cleanup directory %(target)s: '
'%(e)s'), {'target': target_del, 'e': e},
instance=instance)
# It is possible that the delete failed, if so don't mark the instance
# as cleaned.
if remaining_path and os.path.exists(remaining_path):
LOG.info(_LI('Deletion of %s failed'), remaining_path,
instance=instance)
return False
LOG.info(_LI('Deletion of %s complete'), target_del, instance=instance)
return True
@property
def need_legacy_block_device_info(self):
return False
def default_root_device_name(self, instance, image_meta, root_bdm):
disk_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "disk")
cdrom_bus = blockinfo.get_disk_bus_for_device_type(
CONF.libvirt.virt_type, image_meta, "cdrom")
root_info = blockinfo.get_root_info(
CONF.libvirt.virt_type, image_meta, root_bdm, disk_bus,
cdrom_bus)
return block_device.prepend_dev(root_info['dev'])
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
ephemerals, swap, block_device_mapping = block_device_lists[:3]
blockinfo.default_device_names(CONF.libvirt.virt_type,
nova_context.get_admin_context(),
instance, root_device_name,
ephemerals, swap,
block_device_mapping,
image_meta)
def is_supported_fs_format(self, fs_type):
return fs_type in [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
def _get_power_state(self, virt_dom):
dom_info = self._host.get_domain_info(virt_dom)
return LIBVIRT_POWER_STATE[dom_info[0]]
| {
"content_hash": "f6c5979897ab5f16e66b786badc0317f",
"timestamp": "",
"source": "github",
"line_count": 6701,
"max_line_length": 79,
"avg_line_length": 44.15460379047903,
"alnum_prop": 0.5408138434500473,
"repo_name": "dawnpower/nova",
"id": "4e94b6033d745987a8ad27999dc7365a23c961f0",
"size": "296807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16032321"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "406262"
}
],
"symlink_target": ""
} |
import RPi.GPIO as GPIO
import time
class DistanceDetector(object):
"""docstring for DistanceDetector"""
def __init__(self, trig, echo, flag):
super(DistanceDetector, self).__init__()
self.TRIG = trig
self.ECHO = echo
self.start = 0
self.stop = 0
self.distance = -1
self.flag = flag
GPIO.setup(self.TRIG, GPIO.OUT)
GPIO.setup(self.ECHO, GPIO.IN)
def start(self):
try:
while True:
GPIO.output(self.TRIG, 0)
time.sleep(0.01)
GPIO.output(self.TRIG, 1)
time.sleep(0.00001)
GPIO.output(self.TRIG, 0)
self.start = time.time()
while GPIO.input(self.ECHO) == 0:
self.start = time.time()
while GPIO.input(self.ECHO) == 1:
self.stop = time.time()
self.distance = (self.stop - self.start) * \
34000 / 2 # 声波的速度是340m/s
print u'%s 距离障碍物 %s cm' % (self.flag, self.distance)
except Exception as e:
raise e
def tooclose(self):
return self.distance <= 10
| {
"content_hash": "f25d3e24d68b4eb119009404740ee0f3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 68,
"avg_line_length": 27.386363636363637,
"alnum_prop": 0.49377593360995853,
"repo_name": "ArmsZhou/Raspberry-Pi-Python-scripts",
"id": "c7ceddd8896e98d179ab385f4d2792e0d59f697c",
"size": "1273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distancedetector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125420"
}
],
"symlink_target": ""
} |
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', 'django.contrib.auth.views.login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', {'next_page': '/'}),
url(r'', include('blog.urls')),
]
| {
"content_hash": "771b3c12630770c451bfae5e41b0cf82",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 32.785714285714285,
"alnum_prop": 0.6405228758169934,
"repo_name": "maciek263/django2",
"id": "e829b74607efcd4b371a0ccc90a8a71f0567d5f8",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1031"
},
{
"name": "CSS",
"bytes": "43229"
},
{
"name": "HTML",
"bytes": "75613"
},
{
"name": "JavaScript",
"bytes": "104857"
},
{
"name": "PowerShell",
"bytes": "1458"
},
{
"name": "Python",
"bytes": "5780142"
},
{
"name": "Tcl",
"bytes": "24447"
}
],
"symlink_target": ""
} |
from lino.api import dd, rt, _
def objects():
ExcerptType = rt.models.excerpts.ExcerptType
kw = dict(
template="default.weasy.html",
primary=True, certifying=True,
build_method='weasy2pdf',
**dd.str2kw('name', _("VAT declaration")))
yield ExcerptType.update_for_model(rt.models.bevats.Declaration, **kw)
| {
"content_hash": "75f6e91dafdd1955b45d8f9f73d54e9e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 74,
"avg_line_length": 31.818181818181817,
"alnum_prop": 0.6485714285714286,
"repo_name": "lino-framework/xl",
"id": "606606ed98dc36afdd482b0e8889bd56e979e4a9",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/bevats/fixtures/std.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
} |
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D,Conv3D
from keras.optimizers import SGD
'''
dim_ordering issue:
- 'th'-style dim_ordering: [batch, channels, depth, height, width]
- 'tf'-style dim_ordering: [batch, depth, height, width, channels]
'''
NUM_CLASSES = 2
NUM_FRAMES_PER_CLIP = 16
INPUT_IMAGE_SIZE = 112
def get_model(summary=False, backend='tf'):
""" Return the Keras model of the network
"""
model = Sequential()
if backend == 'tf':
input_shape=(NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3) # l, h, w, c
else:
input_shape=(3, NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE) # c, l, h, w
model.add(Convolution3D(64, 3, 3, 3, activation='relu',
border_mode='same', name='conv1',
input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
border_mode='valid', name='pool1'))
# 2nd layer group
model.add(Convolution3D(128, 3, 3, 3, activation='relu',
border_mode='same', name='conv2'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool2'))
# 3rd layer group
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3a'))
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3b'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool3'))
# 4th layer group
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4a'))
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4b'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool4'))
# 5th layer group
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5a'))
model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5b'))
model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool5'))
model.add(Flatten())
# FC layers group
model.add(Dense(4096, activation='relu', name='fc6'))
model.add(Dropout(.5))
model.add(Dense(4096, activation='relu', name='fc7'))
model.add(Dropout(.5))
model.add(Dense(NUM_CLASSES, activation='softmax', name='fc8'))
if summary:
print(model.summary())
return model
def get_model_3l(dropouts,summary=False, backend='tf'):
""" Return the Keras model of the network
"""
model = Sequential()
if backend == 'tf':
input_shape = (NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3) # l, h, w, c
else:
input_shape = (3, NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE) # c, l, h, w
model.add(Convolution3D(4, 3, 3, 3, activation='relu',
border_mode='same', name='conv1',
input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
border_mode='valid', name='pool1'))
model.add(Dropout(dropouts[0]))
# 2nd layer group
model.add(Convolution3D(12, 3, 3, 3, activation='relu',
border_mode='same', name='conv2'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool2'))
model.add(Dropout(dropouts[1]))
# 3rd layer group
model.add(Convolution3D(40, 3, 3, 3, activation='relu',
border_mode='same', name='conv3a'))
model.add(Convolution3D(40, 3, 3, 3, activation='relu',
border_mode='same', name='conv3b'))
model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad3'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool3'))
model.add(Flatten())
# FC layers group
model.add(Dense(128, activation='relu', name='fc6'))
model.add(Dropout(dropouts[2]))
model.add(Dense(128, activation='relu', name='fc7'))
model.add(Dropout(dropouts[3]))
model.add(Dense(NUM_CLASSES, activation='softmax', name='fc8'))
if summary:
print(model.summary())
return model
def get_model_3l_k2API(dropouts,summary=False, backend='tf'):
""" Return the Keras model of the network
"""
model = Sequential()
pads,padv, act = "same","valid","relu"
if backend == 'tf':
input_shape = (NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3) # l, h, w, c
else:
input_shape = (3, NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE) # c, l, h, w
model.add(Conv3D(4, (3, 3, 3), padding=pads,activation=act,input_shape=input_shape,name='conv1'))
model.add(MaxPooling3D(padding=padv,strides=(1, 2, 2),pool_size=(1, 2, 2),name='pool1'))
model.add(Dropout(dropouts[0]))
# 2nd layer group
model.add(Conv3D(12, (3, 3, 3),padding=pads,activation=act,name='conv2'))
model.add(MaxPooling3D(padding=padv,strides=(2, 2, 2),pool_size=(2, 2, 2),
name='pool2'))
model.add(Dropout(dropouts[1]))
# 3rd layer group
model.add(Conv3D(40, (3, 3, 3),padding=pads,activation=act,name='conv3a'))
model.add(Conv3D(40, (3, 3, 3),padding=pads,activation=act,name='conv3b'))
model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad3'))
model.add(MaxPooling3D(padding=padv,strides=(2, 2, 2),pool_size=(2, 2, 2),
name='pool3'))
model.add(Flatten())
# FC layers group
model.add(Dense(128, activation=act, name='fc6'))
model.add(Dropout(dropouts[2]))
model.add(Dense(128, activation=act, name='fc7'))
model.add(Dropout(dropouts[3]))
model.add(Dense(NUM_CLASSES, activation='softmax', name='fc8'))
if summary:
print(model.summary())
return model
def get_model_3l_deep(dropouts,summary=False, backend='tf'):
""" Return the Keras model of the network
"""
model = Sequential()
if backend == 'tf':
input_shape = (NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3) # l, h, w, c
else:
input_shape = (3, NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE) # c, l, h, w
model.add(Convolution3D(64, 3, 3, 3, activation='relu',
border_mode='same', name='conv1',
input_shape=input_shape))
model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
border_mode='valid', name='pool1'))
model.add(Dropout(dropouts[0]))
# 2nd layer group
model.add(Convolution3D(128, 3, 3, 3, activation='relu',
border_mode='same', name='conv2'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool2'))
model.add(Dropout(dropouts[1]))
# 3rd layer group
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3a'))
model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3b'))
model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad3'))
model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool3'))
model.add(Flatten())
# FC layers group
model.add(Dense(512, activation='relu', name='fc6'))
model.add(Dropout(dropouts[2]))
model.add(Dense(512, activation='relu', name='fc7'))
model.add(Dropout(dropouts[3]))
model.add(Dense(NUM_CLASSES, activation='softmax', name='fc8'))
if summary:
print(model.summary())
return model
def get_int_model(model, layer, backend='tf'):
if backend == 'tf':
input_shape=(NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE, 3) # l, h, w, c
else:
input_shape=(3, NUM_FRAMES_PER_CLIP, INPUT_IMAGE_SIZE, INPUT_IMAGE_SIZE) # c, l, h, w
int_model = Sequential()
int_model.add(Convolution3D(64, 3, 3, 3, activation='relu',
border_mode='same', name='conv1',
input_shape=input_shape,
weights=model.layers[0].get_weights()))
if layer == 'conv1':
return int_model
int_model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
border_mode='valid', name='pool1'))
if layer == 'pool1':
return int_model
# 2nd layer group
int_model.add(Convolution3D(128, 3, 3, 3, activation='relu',
border_mode='same', name='conv2',
weights=model.layers[2].get_weights()))
if layer == 'conv2':
return int_model
int_model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool2'))
if layer == 'pool2':
return int_model
# 3rd layer group
int_model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3a',
weights=model.layers[4].get_weights()))
if layer == 'conv3a':
return int_model
int_model.add(Convolution3D(256, 3, 3, 3, activation='relu',
border_mode='same', name='conv3b',
weights=model.layers[5].get_weights()))
if layer == 'conv3b':
return int_model
int_model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool3'))
if layer == 'pool3':
return int_model
# 4th layer group
int_model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4a',
weights=model.layers[7].get_weights()))
if layer == 'conv4a':
return int_model
int_model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv4b',
weights=model.layers[8].get_weights()))
if layer == 'conv4b':
return int_model
int_model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool4'))
if layer == 'pool4':
return int_model
# 5th layer group
int_model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5a',
weights=model.layers[10].get_weights()))
if layer == 'conv5a':
return int_model
int_model.add(Convolution3D(512, 3, 3, 3, activation='relu',
border_mode='same', name='conv5b',
weights=model.layers[11].get_weights()))
if layer == 'conv5b':
return int_model
int_model.add(ZeroPadding3D(padding=(0, 1, 1), name='zeropad'))
int_model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
border_mode='valid', name='pool5'))
if layer == 'pool5':
return int_model
int_model.add(Flatten())
# FC layers group
int_model.add(Dense(4096, activation='relu', name='fc6',
weights=model.layers[15].get_weights()))
if layer == 'fc6':
return int_model
int_model.add(Dropout(.5))
int_model.add(Dense(4096, activation='relu', name='fc7',
weights=model.layers[17].get_weights()))
if layer == 'fc7':
return int_model
int_model.add(Dropout(.5))
int_model.add(Dense(NUM_CLASSES, activation='softmax', name='fc8',
weights=model.layers[19].get_weights()))
if layer == 'fc8':
return int_model
return None
if __name__ == '__main__':
model = get_model(summary=True)
| {
"content_hash": "8dd8cb9cf9242d62f6d7f0034de6a502",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 99,
"avg_line_length": 45.6542750929368,
"alnum_prop": 0.5702304372608094,
"repo_name": "bbcdli/xuexi",
"id": "adc0b4b28305ef0ee804f523f9e4c50887f22000",
"size": "12301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vid_ana_k/c3d_keras_model_newer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "862"
},
{
"name": "C++",
"bytes": "30570"
},
{
"name": "HTML",
"bytes": "250"
},
{
"name": "Python",
"bytes": "2911994"
},
{
"name": "Shell",
"bytes": "14763"
}
],
"symlink_target": ""
} |
"""Tests for sonnet.python.modules.util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import tempfile
# Dependency imports
from absl.testing import parameterized
import numpy as np
import sonnet as snt
import sonnet.python.modules.util as util
import tensorflow as tf
_EXPECTED_FORMATTED_VARIABLE_LIST = (
"Variable Shape Type Collections Device\n"
"m1/v1 3x4 float32 global_variables, trainable_variables\n"
"m2/v2 5x6 float32 local_variables "
"/device:GPU:*"
)
_EXPECTED_FORMATTED_VARIABLE_MAP = (
"Key Variable Shape Type Collections "
"Device\n"
"vv1 m1/v1 3x4 float32 global_variables, trainable_variables\n"
"vv2 m2/v2 5x6 float32 local_variables "
"/device:GPU:*"
)
class UtilTest(parameterized.TestCase, tf.test.TestCase):
def testQueryInModule(self):
module = snt.Linear(output_size=42, name="linear")
with self.assertRaisesRegexp(snt.Error, "not instantiated yet"):
module.get_variables()
# Compare to the desired result set, after connection.
input_ = tf.placeholder(tf.float32, shape=[3, 4])
_ = module(input_)
self.assertEqual(set(module.get_variables()),
{module.w, module.b})
self.assertEqual(set(snt.get_variables_in_module(module)),
{module.w, module.b})
def testScopeQuery(self):
with tf.variable_scope("prefix") as s1:
v1 = tf.get_variable("a", shape=[3, 4])
with tf.variable_scope("prefix_with_more_stuff") as s2:
v2 = tf.get_variable("b", shape=[5, 6])
v3 = tf.get_variable("c", shape=[7])
# get_variables_in_scope should add a "/" to only search that scope, not
# any others which share the same prefix.
self.assertEqual(snt.get_variables_in_scope(s1), (v1,))
self.assertEqual(set(snt.get_variables_in_scope(s2)), {v2, v3})
self.assertEqual(snt.get_variables_in_scope(s1.name), (v1,))
self.assertEqual(set(snt.get_variables_in_scope(s2.name)), {v2, v3})
def testIsScopePrefix(self):
self.assertTrue(util._is_scope_prefix("a/b/c", ""))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/b/c"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/b"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a"))
self.assertTrue(util._is_scope_prefix("a/b/c", "a/"))
self.assertFalse(util._is_scope_prefix("a/b/c", "b"))
self.assertFalse(util._is_scope_prefix("ab/c", "a"))
def testGetNormalizedVariableMapScope(self):
with tf.variable_scope("prefix") as s1:
v1 = tf.get_variable("a", shape=[5, 6])
v2 = tf.get_variable("b", shape=[7])
variable_map = snt.get_normalized_variable_map(s1)
self.assertEqual(len(variable_map), 2)
self.assertIn("a", variable_map)
self.assertIn("b", variable_map)
self.assertIs(variable_map["a"], v1)
self.assertIs(variable_map["b"], v2)
def testGetNormalizedVariableMapScopeContext(self):
with tf.variable_scope("prefix1") as s1:
with tf.variable_scope("prefix2") as s2:
v1 = tf.get_variable("a", shape=[5, 6])
v2 = tf.get_variable("b", shape=[7])
with tf.variable_scope("prefix") as s3:
_ = tf.get_variable("c", shape=[8])
err = r"Scope 'prefix1/prefix2' is not prefixed by 'prefix'."
with self.assertRaisesRegexp(ValueError, err):
variable_map = snt.get_normalized_variable_map(s2, context=s3)
variable_map = snt.get_normalized_variable_map(s2, context=s1)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1),
variable_map)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s1.name),
variable_map)
self.assertEqual(len(variable_map), 2)
self.assertIn("prefix2/a", variable_map)
self.assertIn("prefix2/b", variable_map)
self.assertIs(variable_map["prefix2/a"], v1)
self.assertIs(variable_map["prefix2/b"], v2)
with tf.variable_scope("") as s4:
self.assertEqual(s4.name, "")
variable_map = snt.get_normalized_variable_map(s2, context=s4)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4),
variable_map)
self.assertEqual(snt.get_normalized_variable_map(s2.name, context=s4.name),
variable_map)
self.assertEqual(len(variable_map), 2)
self.assertIn("prefix1/prefix2/a", variable_map)
self.assertIn("prefix1/prefix2/b", variable_map)
self.assertIs(variable_map["prefix1/prefix2/a"], v1)
self.assertIs(variable_map["prefix1/prefix2/b"], v2)
def testGetNormalizedVariableMapModule(self):
input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
conv = snt.Conv2D(output_channels=3, kernel_shape=3)
conv(input_)
variable_map = snt.get_normalized_variable_map(conv)
self.assertEqual(len(variable_map), 2)
self.assertIn("w", variable_map)
self.assertIn("b", variable_map)
self.assertIs(variable_map["w"], conv.w)
self.assertIs(variable_map["b"], conv.b)
def testGetNormalizedVariableMapWithPartitionedVariable(self):
hidden = tf.ones(shape=(1, 16, 16, 3))
partitioner = tf.variable_axis_size_partitioner(4)
conv = snt.Conv2D(output_channels=3,
kernel_shape=3,
stride=1,
partitioners={"w": partitioner})
conv(hidden)
variable_map = snt.get_normalized_variable_map(conv,
group_sliced_variables=True)
self.assertEqual(len(variable_map), 2)
self.assertEqual(variable_map["b"], conv.b)
self.assertEqual(len(variable_map["w"]), 3)
variable_map = snt.get_normalized_variable_map(conv,
group_sliced_variables=False)
self.assertEqual(variable_map["b"], conv.b)
self.assertEqual(set(variable_map), set(["b", "w/part_0", "w/part_1",
"w/part_2"]))
def testVariableMapItems(self):
hidden = tf.ones(shape=(1, 16, 16, 3))
partitioner = tf.variable_axis_size_partitioner(4)
conv = snt.Conv2D(output_channels=3,
kernel_shape=3,
stride=1,
partitioners={"w": partitioner})
conv(hidden)
variable_map = snt.get_normalized_variable_map(conv)
items = snt.variable_map_items(variable_map)
items_str = sorted((key, var.op.name) for key, var in items)
self.assertEqual(
items_str,
[(u"b", u"conv_2d/b"), ("w", u"conv_2d/w/part_0"),
("w", u"conv_2d/w/part_1"), ("w", u"conv_2d/w/part_2")])
def testGetSaverScope(self):
with tf.variable_scope("prefix") as s1:
tf.get_variable("a", shape=[5, 6])
tf.get_variable("b", shape=[7])
saver = snt.get_saver(s1)
self.assertIsInstance(saver, tf.train.Saver)
self.assertEqual(set(saver._var_list), set(["a", "b"]))
def testGetSaverModule(self):
input_ = tf.placeholder(tf.float32, shape=[1, 10, 10, 3])
conv = snt.Conv2D(output_channels=3, kernel_shape=3)
conv(input_)
saver = snt.get_saver(conv)
self.assertIsInstance(saver, tf.train.Saver)
self.assertIn("w", saver._var_list)
self.assertIn("b", saver._var_list)
def _create_conv(self, partitioned, name):
hidden = tf.ones(shape=(1, 16, 16, 3))
if partitioned:
partitioners = {"w": tf.variable_axis_size_partitioner(4)}
else:
partitioners = None
conv = snt.Conv2D(output_channels=3, kernel_shape=3, stride=1,
partitioners=partitioners, name=name)
conv(hidden)
return conv
@parameterized.parameters(
{"save_partitioned": True, "load_partitioned": True},
{"save_partitioned": True, "load_partitioned": False},
{"save_partitioned": False, "load_partitioned": True},
{"save_partitioned": False, "load_partitioned": False})
def testGetSaverPartitioned(self, save_partitioned, load_partitioned):
path = os.path.join(tempfile.mkdtemp(), "ckpt")
# Save checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=save_partitioned, name="a")
saver = snt.get_saver(conv)
sess.run(tf.global_variables_initializer())
saver.save(sess, path)
w = tf.identity(conv.w)
w_value = sess.run(w)
# Restore checkpoint.
with self.test_session() as sess:
conv = self._create_conv(partitioned=load_partitioned, name="b")
saver = snt.get_saver(conv)
saver.restore(sess, path)
w = tf.identity(conv.w)
self.assertAllEqual(sess.run(w), w_value)
def testCollectionGetVariableInScope(self):
with tf.variable_scope("prefix") as s1:
tf.get_variable("a", shape=[1], collections=["test"], trainable=False)
self.assertEqual(len(snt.get_variables_in_scope(s1)), 0)
self.assertEqual(len(snt.get_variables_in_scope(s1, collection="test2")), 0)
self.assertEqual(len(snt.get_variables_in_scope(s1, collection="test")), 1)
def testCollectionGetSaver(self):
with tf.variable_scope("prefix") as s1:
input_ = tf.placeholder(tf.float32, shape=[3, 4])
net = snt.Linear(10)(input_)
net = snt.BatchNorm()(net, is_training=True)
saver1 = snt.get_saver(s1)
saver2 = snt.get_saver(s1, collections=(tf.GraphKeys.TRAINABLE_VARIABLES,))
self.assertIsInstance(saver1, tf.train.Saver)
self.assertIsInstance(saver2, tf.train.Saver)
self.assertEqual(len(saver1._var_list), 5)
self.assertIn("linear/w", saver1._var_list)
self.assertIn("linear/b", saver1._var_list)
self.assertIn("batch_norm/beta", saver1._var_list)
self.assertIn("batch_norm/moving_mean", saver1._var_list)
self.assertIn("batch_norm/moving_variance", saver1._var_list)
self.assertEqual(len(saver2._var_list), 3)
self.assertIn("linear/w", saver2._var_list)
self.assertIn("linear/b", saver2._var_list)
self.assertIn("batch_norm/beta", saver2._var_list)
self.assertNotIn("batch_norm/moving_mean", saver2._var_list)
self.assertNotIn("batch_norm/moving_variance", saver2._var_list)
def testCheckInitializers(self):
initializers = {"key_a": tf.truncated_normal_initializer(mean=0,
stddev=1),
"key_c": tf.truncated_normal_initializer(mean=0,
stddev=1)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid initializer keys.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
del initializers["key_c"]
initializers["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Initializer for.*",
snt.check_initializers,
initializers=initializers,
keys=keys)
initializers["key_b"] = {"key_c": tf.truncated_normal_initializer(mean=0,
stddev=1),
"key_d": tf.truncated_normal_initializer(mean=0,
stddev=1)}
snt.check_initializers(initializers=initializers, keys=keys)
def testCheckPartitioners(self):
partitioners = {"key_a": tf.variable_axis_size_partitioner(10),
"key_c": tf.variable_axis_size_partitioner(10)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid partitioner keys.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
del partitioners["key_c"]
partitioners["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Partitioner for.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
partitioners["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Partitioner for.*",
snt.check_partitioners,
partitioners=partitioners,
keys=keys)
partitioners["key_b"] = {"key_c": tf.variable_axis_size_partitioner(10),
"key_d": tf.variable_axis_size_partitioner(10)}
snt.check_partitioners(partitioners=partitioners, keys=keys)
def testCheckRegularizers(self):
regularizers = {"key_a": tf.contrib.layers.l1_regularizer(scale=0.5),
"key_c": tf.contrib.layers.l2_regularizer(scale=0.5)}
keys = ["key_a", "key_b"]
self.assertRaisesRegexp(KeyError,
"Invalid regularizer keys.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
del regularizers["key_c"]
regularizers["key_b"] = "not a function"
self.assertRaisesRegexp(TypeError,
"Regularizer for.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
regularizers["key_b"] = {"key_c": "not a function"}
self.assertRaisesRegexp(TypeError,
"Regularizer for.*",
snt.check_regularizers,
regularizers=regularizers,
keys=keys)
regularizers["key_b"] = {
"key_c": tf.contrib.layers.l1_regularizer(scale=0.5),
"key_d": tf.contrib.layers.l2_regularizer(scale=0.5)}
snt.check_regularizers(regularizers=regularizers, keys=keys)
def testInvalidDicts(self):
batch_size = 3
# Mistake seen in the wild - https://github.com/deepmind/sonnet/issues/74
# Should actually be {'hidden_to_hidden': {'w': some_initializers(), ...}}
initializers = {"hidden_to_hidden": tf.truncated_normal_initializer(0, 1)}
vanilla_rnn = snt.VanillaRNN(hidden_size=23, initializers=initializers)
with self.assertRaisesRegexp(TypeError, "Expected a dict"):
vanilla_rnn(tf.zeros([batch_size, 4], dtype=tf.float32),
vanilla_rnn.zero_state(batch_size, dtype=tf.float32))
# Error: should be a dict mapping strings to partitioners/regularizers.
partitioners = tf.fixed_size_partitioner(num_shards=16)
with self.assertRaisesRegexp(TypeError, "Expected a dict"):
snt.LSTM(hidden_size=42, partitioners=partitioners)
regularizers = tf.contrib.layers.l1_regularizer(scale=0.5)
with self.assertRaisesRegexp(TypeError, "Expected a dict"):
snt.GRU(hidden_size=108, regularizers=regularizers)
def testHasVariableScope(self):
self.assertFalse(snt.has_variable_scope("string"))
linear = snt.Linear(10)
self.assertTrue(snt.has_variable_scope(linear))
linear(tf.ones((10, 10)))
self.assertTrue(snt.has_variable_scope(linear))
def testFormatVariables(self):
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
self.assertEqual(snt.format_variables([v2, v1]),
_EXPECTED_FORMATTED_VARIABLE_LIST)
def testFormatVariableMap(self):
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
var_map = {"vv1": v1, "vv2": v2}
self.assertEqual(snt.format_variable_map(var_map),
_EXPECTED_FORMATTED_VARIABLE_MAP)
def testLogVariables(self):
tf.get_default_graph().add_to_collection("config", {"version": 1})
with tf.variable_scope("m1"):
tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
tf.get_local_variable("v2", shape=[5, 6])
snt.log_variables()
def testLogVariables_with_arg(self):
tf.get_default_graph().add_to_collection("config", {"version": 1})
with tf.variable_scope("m1"):
v1 = tf.get_variable("v1", shape=[3, 4])
with tf.device("/gpu"):
with tf.variable_scope("m2"):
v2 = tf.get_local_variable("v2", shape=[5, 6])
snt.log_variables([v2, v1])
class ReuseVarsTest(tf.test.TestCase):
class VariableContainer(object):
def __init__(self, name):
with tf.variable_scope(name) as vs:
self.variable_scope = vs
@util.reuse_variables
def method_with_reuse(self):
return tf.get_variable("a", shape=[1])
def method_without_reuse(self):
return tf.get_variable("b", shape=[1])
class InheritedVariableContainer(VariableContainer):
@util.reuse_variables
def not_inherited_method_with_reuse(self):
return tf.get_variable("c", shape=[1])
def test_reuse_method(self):
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope2")
self.assertEqual("b", obj1.method_without_reuse().op.name)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj1.method_without_reuse)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj2.method_without_reuse)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
def test_multiple_objects_per_variable_scope(self):
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
def test_reuse_inherited_method(self):
obj1 = ReuseVarsTest.InheritedVariableContainer("scope1")
obj2 = ReuseVarsTest.InheritedVariableContainer("scope2")
self.assertEqual("b", obj1.method_without_reuse().op.name)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj1.method_without_reuse)
self.assertRaisesRegexp(ValueError,
r"Variable b already exists, disallowed.*",
obj2.method_without_reuse)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope1/c", obj1.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name)
self.assertEqual("scope2/c", obj2.not_inherited_method_with_reuse().op.name)
class ModuleReuse(snt.AbstractModule):
def __init__(self, shape, name="multi_template_test"):
super(ReuseVarsTest.ModuleReuse, self).__init__(name=name)
self._shape = shape
@util.reuse_variables
def a(self):
return tf.get_variable("a", shape=self._shape)
@util.reuse_variables
def add_b(self, inputs):
return inputs + tf.get_variable("b", shape=self._shape)
def _build(self, inputs):
return self.add_b(inputs + self.a())
def test_reuse_abstract_module(self):
np.random.seed(100)
batch_size = 3
in_size = 4
inputs = tf.placeholder(tf.float32, shape=[batch_size, in_size])
module1 = ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list())
module2 = ReuseVarsTest.ModuleReuse(inputs.get_shape().as_list())
a1 = module1.a()
inputs_plus_b1 = module1.add_b(inputs)
inputs_plus_ab1 = module1(inputs) # pylint: disable=not-callable
inputs_plus_ab2 = module2(inputs) # pylint: disable=not-callable
inputs_plus_b2 = module2.add_b(inputs)
a2 = module2.a()
inputs_plus_ab1_again = module1(inputs) # pylint: disable=not-callable
inputs_plus_ab2_again = module2(inputs) # pylint: disable=not-callable
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
input_data = np.random.rand(batch_size, in_size)
out = sess.run([a1, inputs_plus_b1, inputs_plus_ab1, a2, inputs_plus_b2,
inputs_plus_ab2],
feed_dict={inputs: input_data})
self.assertNotAlmostEqual(np.linalg.norm(out[0] - out[3]), 0)
self.assertNotAlmostEqual(np.linalg.norm(out[1] - out[4]), 0)
self.assertNotAlmostEqual(np.linalg.norm(out[2] - out[5]), 0)
self.assertAllClose(out[0] + out[1], out[2])
self.assertAllClose(out[3] + out[4], out[5])
out = sess.run([inputs_plus_ab1, inputs_plus_ab1_again],
feed_dict={inputs: input_data})
self.assertAllEqual(out[0], out[1])
out = sess.run([inputs_plus_ab2, inputs_plus_ab2_again],
feed_dict={inputs: input_data})
self.assertAllEqual(out[0], out[1])
def test_variable_scope_call_order(self):
class TestModule(snt.AbstractModule):
def __init__(self, name="test_module"):
super(TestModule, self).__init__(name=name)
@util.reuse_variables
def a(self):
return self.scope_name
def _build(self):
pass
@property
def variable_scope(self):
# Needed to access `self.variable_scope` before calling `self.build()`.
return self._template.variable_scope
m1 = TestModule(name="m1")
m2 = TestModule(name="m2")
a1 = m1.a
a2 = m2.a
self.assertEqual("m1", a1())
self.assertEqual("m2", a2())
def test_multiple_graphs(self):
g1 = tf.Graph()
g2 = tf.Graph()
with g1.as_default():
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
with g2.as_default():
obj1 = ReuseVarsTest.VariableContainer("scope1")
obj2 = ReuseVarsTest.VariableContainer("scope1")
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj1.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
self.assertEqual("scope1/a", obj2.method_with_reuse().op.name)
def test_name_scopes(self):
class VariableContainerWithOps(ReuseVarsTest.VariableContainer):
@util.reuse_variables
def add_b(self, tensor):
b = tf.get_variable("b", shape=[1])
return tensor + b
@util.reuse_variables
def add_a(self, tensor):
return tensor + self.method_with_reuse()
@util.reuse_variables
def nested_add(self, tensor):
return tf.ones(shape=[1]) + self.add_a(tensor)
def get_tensor_names_from_default_graph():
ops = [
op for op in tf.get_default_graph().get_operations()
if "Initializer" not in op.name and "Assign" not in op.name and
"read" not in op.name
]
tensor_names = []
for op in ops:
tensor_names.extend(tensor.name for tensor in op.outputs)
return tensor_names
obj1 = VariableContainerWithOps("scope1")
obj2 = VariableContainerWithOps("scope2")
zeros = tf.zeros(shape=[1])
self.assertEqual("scope1/add_b/add", obj1.add_b(zeros).op.name)
self.assertEqual("scope1/add_b_1/add", obj1.add_b(zeros).op.name)
self.assertEqual("scope1/add_a/add", obj1.add_a(zeros).op.name)
self.assertEqual("scope1/add_a_1/add", obj1.add_a(zeros).op.name)
self.assertEqual("scope1/nested_add/add", obj1.nested_add(zeros).op.name)
self.assertEqual("scope1/nested_add_1/add", obj1.nested_add(zeros).op.name)
ones = tf.ones(shape=[1])
self.assertEqual("scope2/add_b/add", obj2.add_b(ones).op.name)
self.assertEqual("scope2/add_b_1/add", obj2.add_b(ones).op.name)
self.assertEqual("scope2/add_a/add", obj2.add_a(ones).op.name)
self.assertEqual("scope2/add_a_1/add", obj2.add_a(ones).op.name)
self.assertEqual("scope2/nested_add/add", obj2.nested_add(ones).op.name)
self.assertEqual("scope2/nested_add_1/add", obj2.nested_add(ones).op.name)
observed_tensor_names = get_tensor_names_from_default_graph()
# Keep this for compatibility with versions of tensorflow lower than 1.6
if len(observed_tensor_names) == 38:
expected_tensor_names = [
u"zeros/shape_as_tensor:0",
u"zeros/Const:0",
u"zeros:0",
u"scope1/b:0",
u"scope1/add_b/add:0",
u"scope1/add_b_1/add:0",
u"scope1/a:0",
u"scope1/add_a/add:0",
u"scope1/add_a_1/add:0",
u"scope1/nested_add/ones/shape_as_tensor:0",
u"scope1/nested_add/ones/Const:0",
u"scope1/nested_add/ones:0",
u"scope1/add_a_2/add:0",
u"scope1/nested_add/add:0",
u"scope1/nested_add_1/ones/shape_as_tensor:0",
u"scope1/nested_add_1/ones/Const:0",
u"scope1/nested_add_1/ones:0",
u"scope1/add_a_3/add:0",
u"scope1/nested_add_1/add:0",
u"ones/shape_as_tensor:0",
u"ones/Const:0",
u"ones:0",
u"scope2/b:0",
u"scope2/add_b/add:0",
u"scope2/add_b_1/add:0",
u"scope2/a:0",
u"scope2/add_a/add:0",
u"scope2/add_a_1/add:0",
u"scope2/nested_add/ones/shape_as_tensor:0",
u"scope2/nested_add/ones/Const:0",
u"scope2/nested_add/ones:0",
u"scope2/add_a_2/add:0",
u"scope2/nested_add/add:0",
u"scope2/nested_add_1/ones/shape_as_tensor:0",
u"scope2/nested_add_1/ones/Const:0",
u"scope2/nested_add_1/ones:0",
u"scope2/add_a_3/add:0",
u"scope2/nested_add_1/add:0"
]
else:
expected_tensor_names = [
"zeros:0",
"scope1/b:0",
"scope1/add_b/add:0",
"scope1/add_b_1/add:0",
"scope1/a:0",
"scope1/add_a/add:0",
"scope1/add_a_1/add:0",
"scope1/nested_add/ones:0",
"scope1/add_a_2/add:0",
"scope1/nested_add/add:0",
"scope1/nested_add_1/ones:0",
"scope1/add_a_3/add:0",
"scope1/nested_add_1/add:0",
"ones:0",
"scope2/b:0",
"scope2/add_b/add:0",
"scope2/add_b_1/add:0",
"scope2/a:0",
"scope2/add_a/add:0",
"scope2/add_a_1/add:0",
"scope2/nested_add/ones:0",
"scope2/add_a_2/add:0",
"scope2/nested_add/add:0",
"scope2/nested_add_1/ones:0",
"scope2/add_a_3/add:0",
"scope2/nested_add_1/add:0",
]
self.assertEqual(expected_tensor_names, observed_tensor_names)
def test_reuse_vars_subgraph_recording(self):
obj1 = ReuseVarsTest.ModuleReuse(shape=[3, 4], name="scope1")
self.assertFalse(obj1.is_connected)
obj1_a_outputs = obj1.a()
self.assertTrue(obj1.is_connected)
self.assertEqual(obj1.last_connected_subgraph.name_scope, "scope1/a/")
self.assertIs(obj1.last_connected_subgraph.module, obj1)
self.assertEqual(obj1.last_connected_subgraph.inputs, {})
self.assertIs(obj1.last_connected_subgraph.outputs, obj1_a_outputs)
class NameFunctionTest(tf.test.TestCase):
def testToSnakeCase(self):
test_cases = [
("UpperCamelCase", "upper_camel_case"),
("lowerCamelCase", "lower_camel_case"),
("endsWithXYZ", "ends_with_xyz"),
("already_snake_case", "already_snake_case"),
("__private__", "private"),
("LSTMModule", "lstm_module"),
("version123p56vfxObject", "version_123p56vfx_object"),
("version123P56VFXObject", "version_123p56vfx_object"),
("versionVFX123P56Object", "version_vfx123p56_object"),
("versionVfx123P56Object", "version_vfx_123p56_object"),
("lstm1", "lstm_1"),
("LSTM1", "lstm1"),
]
for camel_case, snake_case in test_cases:
actual = util.to_snake_case(camel_case)
self.assertEqual(actual, snake_case, "_to_snake_case(%s) -> %s != %s" %
(camel_case, actual, snake_case))
def testNameForCallable_Function(self):
def test():
pass
self.assertName(test, "test")
def testNameForCallable_Lambda(self):
test = lambda x: x
self.assertName(test, None)
def testNameForCallable_Partial(self):
def test(*unused_args):
pass
test = functools.partial(functools.partial(test, "a"), "b")
self.assertName(test, "test")
def testNameForCallable_Instance(self):
class Test(object):
def __call__(self):
pass
self.assertName(Test(), None)
def assertName(self, func, expected):
name = util.name_for_callable(func)
self.assertEqual(name, expected)
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "701f8fa384d43cfb543662c8e635bf49",
"timestamp": "",
"source": "github",
"line_count": 789,
"max_line_length": 80,
"avg_line_length": 38.26869455006337,
"alnum_prop": 0.6092932370669669,
"repo_name": "rakshit-agrawal/sonnet",
"id": "d6b19a781b7f597bb9beca805f4969c883c3c2be",
"size": "30878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sonnet/python/modules/util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76901"
},
{
"name": "Python",
"bytes": "1258835"
},
{
"name": "Shell",
"bytes": "1724"
}
],
"symlink_target": ""
} |
print('Adrienne') | {
"content_hash": "77fb6c38cd3fbff864ba9e2c4e2579a6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 17,
"avg_line_length": 17,
"alnum_prop": 0.7647058823529411,
"repo_name": "anm4vp/cs3240-labdemo",
"id": "c176b769f3f289a1e8493debdff372b2036fe519",
"size": "17",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Adrienne.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147"
}
],
"symlink_target": ""
} |
import os
import sys
import django
from django.conf import settings
DEFAULT_SETTINGS = dict(
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"pinax.testimonials",
"pinax.testimonials.tests"
],
MIDDLEWARE_CLASSES=[],
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
},
SITE_ID=1,
ROOT_URLCONF="pinax.testimonials.tests.urls",
SECRET_KEY="notasecret",
)
def run(*args):
if not settings.configured:
settings.configure(**DEFAULT_SETTINGS)
django.setup()
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.core.management.call_command(
"makemigrations",
"testimonials",
*args
)
if __name__ == "__main__":
run(*sys.argv[1:])
| {
"content_hash": "b2b53afc137f69b6d19fcebc9bc5c65d",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 55,
"avg_line_length": 19.617021276595743,
"alnum_prop": 0.5824295010845987,
"repo_name": "pinax/pinax-testimonials",
"id": "bf8a220e96bf1fbde432a3b7a63eb9dbfc522ae6",
"size": "944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "makemigrations.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "120"
},
{
"name": "Python",
"bytes": "9930"
}
],
"symlink_target": ""
} |
import re
import sys
def parse_line(line,regex):
''' Each line looks like: /plates/9/81 Dataset {22/Inf, 916} '''
match = regex.match(line.strip())
if match:
return [int(duder) for duder in match.groups()]
else:
return "barf"
firstfile = open(sys.argv[1],'r')
secondfile = open(sys.argv[2],'r')
regex = re.compile('\/plates\/([\d]{1,2})\/([\d]+) \s+ Dataset \{([\d]+)[\/]?[\w]*, 916\}', flags=0)
first_plates_dict = {}
second_plates_dict = {}
for i in xrange(1,15):
first_plates_dict[i] = {}
second_plates_dict[i] = {}
print "building first dict..."
for line in firstfile:
line = line.strip()
zug = parse_line(line,regex)
if zug != "barf":
plate,well,count = zug
first_plates_dict[plate][well] = count
print "done!"
firstfile.close()
print "building second dict..."
for line in secondfile:
line = line.strip()
zug = parse_line(line,regex)
if zug != "barf":
plate,well,count = zug
second_plates_dict[plate][well] = count
print "done!"
secondfile.close()
print "comparing both dicts..."
diff = set(first_plates_dict.keys()) - set(second_plates_dict.keys())
print "set difference between first, second: ", diff
count_errors = 0
well_errors = 0
for p in first_plates_dict.keys():
for w in first_plates_dict[p].keys():
try:
first_count = first_plates_dict[p][w]
second_count = second_plates_dict[p][w]
diff = first_count - second_count
#print "difference in first, second file counts for plate %d, well %d : %d" % (p,w,diff)
if diff < 0:
print "Count error: cannot have more cells post filtration"
count_errors += 1
except KeyError:
print "Well error: second file does not have an entry for plate %d, well %w" % (p,w)
well_errors += 1
if count_errors == 0 and well_errors == 0:
print "No errors at this resolution of testing."
else:
print "Found %d count errors, %d well errors. You should check into this." % (count_errors, well_errors) | {
"content_hash": "0820d440b9a5fd5c0388750afa5474dd",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 110,
"avg_line_length": 31.761194029850746,
"alnum_prop": 0.5916353383458647,
"repo_name": "lzamparo/SdA_reduce",
"id": "f849a95456bf538fbc90af96d52128de2f53e184",
"size": "2238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/tests/test_filter_h5.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "548284"
},
{
"name": "R",
"bytes": "27371"
},
{
"name": "Shell",
"bytes": "36047"
}
],
"symlink_target": ""
} |
from django.core.mail import get_connection
from django.core.mail.message import EmailMultiAlternatives
def fix_garbled_mail():
""" 8bit seems to cause buggy emails in Hebrew. revert back to base64"""
# In django 1.5, this prevents BASE64:
from django.core.mail import message
# let's undo it:
from email import Charset
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.BASE64, 'utf-8')
# utf8_charset.body_encoding = Charset.BASE64 # Django 1.6
def send_mails(from_email, emails, subject, message, html_message=None,
fail_silently=False, connection=None):
connection = connection or get_connection(fail_silently=fail_silently)
alts = [(html_message, 'text/html')] if html_message else None
messages = [EmailMultiAlternatives(subject, message, from_email, [email],
alternatives=alts,
connection=connection) for email in
emails]
return connection.send_messages(messages)
| {
"content_hash": "0676e3fe128b43012ca88d39a5717005",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 77,
"avg_line_length": 38.44444444444444,
"alnum_prop": 0.6589595375722543,
"repo_name": "hasadna/OpenCommunity",
"id": "3b042007027990e533214f402a5d0155c899bb6b",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/oc_util/email_util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "596925"
},
{
"name": "HTML",
"bytes": "235437"
},
{
"name": "JavaScript",
"bytes": "2238750"
},
{
"name": "Python",
"bytes": "1264867"
},
{
"name": "Shell",
"bytes": "699"
}
],
"symlink_target": ""
} |
from AccessControl import ClassSecurityInfo
from DateTime import DateTime
from Products.ATContentTypes.content import schemata
from Products.ATExtensions.ateapi import RecordsField
from Products.Archetypes import atapi
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.utils import safe_unicode
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.widgets import ScheduleInputWidget
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
schema = BikaSchema.copy() + Schema((
ReferenceField('Instrument',
allowed_types=('Instrument',),
relationship='InstrumentScheduledTaskInstrument',
widget=StringWidget(
visible=False,
)
),
ComputedField('InstrumentUID',
expression = 'context.getInstrument() and context.getInstrument().UID() or None',
widget=ComputedWidget(
visible=False,
),
),
StringField('Type',
vocabulary = "getTaskTypes",
widget = ReferenceWidget(
checkbox_bound = 0,
label = _("Task type",
"Type"),
),
),
RecordsField('ScheduleCriteria',
required=1,
type='schedulecriteria',
widget=ScheduleInputWidget(
label=_("Criteria"),
),
),
TextField('Considerations',
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
label=_("Considerations"),
description=_("Remarks to take into account before performing the task"),
),
),
))
IdField = schema['id']
schema['description'].required = False
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
schema.moveField('description', before='Considerations')
# Title is not needed to be unique
schema['title'].validators = ()
schema['title']._validationLayer()
class InstrumentScheduledTask(BaseFolder):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getTaskTypes(self):
""" Return the current list of task types
"""
types = [('Calibration', safe_unicode(_('Calibration')).encode('utf-8')),
('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8')),
('Preventive',safe_unicode(_('Preventive')).encode('utf-8')),
('Repair', safe_unicode(_('Repair')).encode('utf-8')),
('Validation', safe_unicode(_('Validation')).encode('utf-8'))]
return DisplayList(types)
def getCriteria(self):
criteria = "";
criterias = self.getScheduleCriteria()
if criterias and len(criterias) > 0:
crit = criterias[0]
if crit['fromenabled'] == True and crit['fromdate']:
criteria += _('From') + " " + crit['fromdate'] + " "
if crit['repeatenabled'] == True and crit['repeatunit'] and crit['repeatperiod']:
criteria += _("repeating every") + " " + crit['repeatunit'] + " " + _(crit['repeatperiod']) + " "
if crit['repeatuntilenabled'] == True and crit['repeatuntil']:
criteria += _("until") + " " + crit['repeatuntil']
return criteria;
atapi.registerType(InstrumentScheduledTask, PROJECTNAME)
| {
"content_hash": "0b80f95592114bef140064c0b3e10c16",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 113,
"avg_line_length": 35.41904761904762,
"alnum_prop": 0.6351169669265931,
"repo_name": "hocinebendou/bika.gsoc",
"id": "d54c1489c4081beb92192ecaeaa868c16b87768d",
"size": "3719",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bika/lims/content/instrumentscheduledtask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "403"
},
{
"name": "COBOL",
"bytes": "5987"
},
{
"name": "CSS",
"bytes": "29758"
},
{
"name": "JavaScript",
"bytes": "411425"
},
{
"name": "Python",
"bytes": "4330980"
},
{
"name": "RobotFramework",
"bytes": "239735"
},
{
"name": "Shell",
"bytes": "11201"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="choroplethmapbox.colorbar.title.font",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
| {
"content_hash": "734cae63b596aed1388acc379178d087",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 28.9375,
"alnum_prop": 0.5853131749460043,
"repo_name": "plotly/plotly.py",
"id": "5eed83b481038958347ce3bf1b50921ae8839e90",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/title/font/_color.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import thread
from Classes.ConfigParser import ConfigParser
# Import functions
# In the future we should house these in a file of there own
# We can dynamically load plugins from the directory
# we wouldn't need to import and add everything by hand.
from Plugins.plugin import Plugin
from slideShow import SlideShowPlugin
from socketClient import IOPlugin
from gtkDisplay import GTKPlugin
def getAdditionalPlugins(runtimeVars):
"""
Gets any User-Defined Plugins specified in the Configuration
from ./Plugins
@param runtimeVars: User-Defined Configuration
@type runtimeVars: Dictionary
@return: User-Define Plugins
@rtype: Array
"""
plugins = []
for plugin in runtimeVars["plugins"]:
try:
exec "from Plugins." + plugin + " import " + plugin
instance = eval(plugin + "()")
if isinstance(instance, Plugin):
print instance
plugins.append(instance)
else:
print "Huh? what did i get? : " + str(instance)
except Exception, e:
print "Couldn't create an instance of a plugin in the config"
print str(e)
return plugins
def main():
"""
The main function of the client
@return: None
@rtype: None
"""
runtimeVars = ConfigParser.readConfig()
plugins = [SlideShowPlugin(), IOPlugin()] + getAdditionalPlugins(runtimeVars)
runtimeVars["plugins"] += ["SlideShowPlugin", "IOPlugin"]
def addPluginToDict(dict, p):
dict[p.getName()] = p.addMessage
return dict
# messageDict = Message-handling functions for each plugin
messageDict = reduce(addPluginToDict, plugins, {})
for plugin in plugins:
plugin.setup(messageDict, runtimeVars)
for plugin in plugins:
print "Starting " + plugin.getName()
if plugin.needsThread():
thread.start_new_thread(plugin.run, (runtimeVars,))
# Instead of having main() be in a busy wait doing nothing,
# we denote GTKPlugin() to be the "main" plugin, so its
# behavior drives any looping done on the main thread
GTKPlugin().run(runtimeVars)
# TODO: Replace with argparse library
if __name__ == "__main__":
main()
| {
"content_hash": "461be5d26ec0c510130cf36132218397",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 81,
"avg_line_length": 30.63013698630137,
"alnum_prop": 0.6547406082289803,
"repo_name": "crew/dds-client",
"id": "9e3493ec054b986416b9222fc30585b86bbf98be",
"size": "2272",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "Client/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "935"
},
{
"name": "Python",
"bytes": "50242"
}
],
"symlink_target": ""
} |
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("SGDRegressor" , "california_housing" , "hive")
| {
"content_hash": "73a2f56b14ce20f9c05d1c8b11526d0b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 66,
"avg_line_length": 34,
"alnum_prop": 0.7720588235294118,
"repo_name": "antoinecarme/sklearn2sql_heroku",
"id": "66cf4142feebfd94d15f4bef996a8187701fc67f",
"size": "136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regression/california_housing/ws_california_housing_SGDRegressor_hive_code_gen.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "507043"
},
{
"name": "Procfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "1021137"
},
{
"name": "R",
"bytes": "2521"
}
],
"symlink_target": ""
} |
import sys
import os
import datetime
import mygeotab
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MyGeotab Python SDK'
copyright = u'{}, {}'.format(datetime.datetime.utcnow().year, mygeotab.__author__)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mygeotab.__version__
# The full version, including alpha/beta/rc tags.
release = mygeotab.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MyGeotabPythonSDKdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MyGeotabPythonSDK.tex', u'MyGeotab Python SDK Documentation',
mygeotab.__author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mygeotabpythonsdk', u'MyGeotab Python SDK Documentation',
[mygeotab.__author__], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MyGeotabPythonSDK', u'MyGeotab Python SDK Documentation',
mygeotab.__author__, 'MyGeotabPythonSDK', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "543b8e91d1da6c1f3446fb32cb93e589",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 82,
"avg_line_length": 32.176,
"alnum_prop": 0.7059920437593237,
"repo_name": "Geotab/mygeotab-python",
"id": "8bb6ab0c60cf4f533f0ad7b53f0dc0decc59e36d",
"size": "8476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91210"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from web_apps.task import views
urlpatterns = patterns(
'',
url(r'^(?P<cluster>.+)/$', views.index, name='index'),
url(r'^$', views.index, name='index'),
)
| {
"content_hash": "41b73deb7bb9ef49c4912d7d4bbabd78",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 58,
"avg_line_length": 26.5,
"alnum_prop": 0.6320754716981132,
"repo_name": "fabrickit/fabkit",
"id": "cfbf66f8077e573cca1fa4f89ee2f1d0f1ea500f",
"size": "212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/webapp/web_apps/task/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4979"
},
{
"name": "CoffeeScript",
"bytes": "65442"
},
{
"name": "HTML",
"bytes": "40630"
},
{
"name": "JavaScript",
"bytes": "2315"
},
{
"name": "Mako",
"bytes": "988"
},
{
"name": "Python",
"bytes": "256382"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
} |
import os
import re
import shutil
import sys
import tempfile
from quickrelease.command import LoggedShellCommand
from quickrelease.deliverable import FindDeliverables, GetDeliverable, GetAllDeliverables
from quickrelease.exception import ReleaseFrameworkError
from quickrelease.step import Step
from quickrelease.utils import GetBuildPlatform, GetSHA1FileHash, JoinPaths
def PlatformCheck(conf):
thisPlatform = GetBuildPlatform()
supportedPlatforms = conf.Get('official_platforms', list)
if thisPlatform not in supportedPlatforms:
raise ReleaseFrameworkError("This example has only been tested on the "
"following platforms: %s. Your platform: %s" % (', '.join(
supportedPlatforms), thisPlatform))
def GetSourceDirRoot(conf):
return JoinPaths(conf.rootDir, conf.Get('source_root_dir'))
def GetObjDir(conf):
return JoinPaths(GetSourceDirRoot(conf), conf.Get('objdir'))
def VerifyFirefoxDownload(conf):
sourceFile = conf.Get('source_download_file')
sourceFileFullPath = JoinPaths(conf.rootDir, sourceFile)
if not os.path.isfile(sourceFileFullPath):
raise ValueError("Couldn't find downloaded firefox source code: %s" %
(sourceFileFullPath))
sha1SumsFile = os.path.basename(conf.Get('sha1_checksum_download_url'))
sha1SumHandle = open(sha1SumsFile, 'r')
sumFilePath = "./source/%s" % (sourceFile)
sourceSha1 = None
for line in sha1SumHandle.readlines():
(sha1, filename) = line.split(None, 1)
if filename.strip() == sumFilePath:
sourceSha1 = sha1
break
sha1SumHandle.close()
if sourceSha1 is None:
raise ValueError("Couldn't find entry for %s in %s" %
(sumFilePath, sha1SumsFile))
downloadSha1 = GetSHA1FileHash(sourceFileFullPath)
if sourceSha1 != downloadSha1:
raise ValueError("Download doesn't match expected SHA1: expected: %s; "
"download checksum: %s" % (sourceSha1, downloadSha1))
def VerifyFileList(fileList, commonPrefix=None):
for f in fileList:
testFile = f
if commonPrefix is not None:
testFile = JoinPaths(commonPrefix, f)
if not os.path.isfile(testFile):
raise ValueError(testFile)
def VerifyFirefoxBuildConfigured(conf):
autoconfTestFiles = conf.Get('autoconf_output_testfiles', list)
objDir = GetObjDir(conf)
try:
VerifyFileList(autoconfTestFiles, objDir)
except ValueError, ex:
raise ValueError("Autoconf test file not present: %s" % (ex))
confStatusFile = JoinPaths(objDir, conf.Get('autoconf_status_file'))
lastLine = None
confStatusFileHandle = open(confStatusFile, 'r')
for l in confStatusFileHandle:
lastLine = l
confStatusFileHandle.close()
if lastLine.strip() != 'exit 0':
raise ValueError("Last %s line didn't match successful exit: %s" %
(confStatusFile, lastLine))
def VerifySuccessfulFirefoxBuild(conf):
firefoxBuildTestFiles = conf.Get('build_test_files', list)
distDir = JoinPaths(GetObjDir(conf), 'dist')
try:
VerifyFileList(firefoxBuildTestFiles, distDir)
except ValueError, ex:
raise ValueError("Expected Firefox build output missing: %s" % (ex))
class FirefoxDownloadKeyAndSums(Step):
def __init__(self, *args, **kwargs):
Step.__init__(self, *args, **kwargs)
self.dlFiles = [ self.config.Get('pgp_key_download_url'),
self.config.Get('sha1_checksum_download_url'),
self.config.Get('sha1_checksum_sig_download_url') ]
def Preflight(self):
PlatformCheck(self.config)
def Execute(self):
for f in self.dlFiles:
cmd = [ self.config.GetConstant('WGET'),
'--progress=dot',
'--no-check-certificate',
f ]
rv = LoggedShellCommand(command=cmd)
def Verify(self):
for f in self.dlFiles:
# Probably shouldn't use os.path.basename here for realsies;
# should really use urlparse, but for this simple case, it
# seems to work fine.
fileName = os.path.basename(f)
if not os.path.isfile(fileName):
raise self.SimpleStepError("Key/checksum file %s missing." %
(fileName))
keyFile = JoinPaths(os.getcwd(),
os.path.basename(self.config.Get('sha1_checksum_sig_download_url')))
sha1SumsFile = JoinPaths(os.getcwd(),
os.path.basename(self.config.Get('sha1_checksum_download_url')))
validationReqd = self.config.Get('require_pgp_validation', bool)
# Could (should?) probably do this via PyCrypto, but for example-
# purposes, I'm feeling lazy.
gpgCommand = self.config.GetConstant('GPG')
cmd = [ gpgCommand,
'--verify',
keyFile,
sha1SumsFile ]
rv = None
try:
rv = LoggedShellCommand(command=cmd,
raiseErrors=False)
except ReleaseFrameworkError, ex:
if str(ex) == "Invalid command or working dir":
if validationReqd:
raise self.SimpleStepError("%s validation required, but "
"it looks like PGP isn't installed. Please install it."
% (gpgCommand))
print >> sys.stderr, ("It appears %s isn't installed. "
"Checksum cannot be considered valid. Continuing anyway..."
% (gpgCommand))
#print "%s, %d, %s, %s" % (rv, rv, rv.stdout, rv.stderr)
try:
if int(rv) != 0:
if re.search('No public key', rv.stderr[-1]):
error = ("Can't validate key; please import KEY (file: %s)"
% (keyFile))
else:
error = "%s failed; exit code: %d; stderr: %s" % (
gpgCommand, rv, '\n'.join(rv.stderr))
if validationReqd:
raise self.SimpleStepError("%s validation required: %s" %
(gpgCommand, error))
else:
print >> sys.stderr, error + "; continuing anyway."
else:
if (rv.stderr[1].find('Good signature') == -1 or
rv.stderr[1].find(self.config.Get('pgp_key_owner')) == -1):
raise self.SimpleStepError("%s claims %s is invalid: %s" %
(gpgCommand, keyFile, '\n'.join(rv.stderr)))
except IndexError:
raise self.SimpleStepError("Unexpected output from %s; bailing."
% gpgCommand)
print '\n'.join(rv.stderr)
class FirefoxDownloadSource(Step):
def Preflight(self):
PlatformCheck(self.config)
def Execute(self):
conf = self.config
cmd = [ conf.GetConstant('WGET'),
'--progress=dot',
'--no-check-certificate',
conf.Get('source_download_url') ]
rv = LoggedShellCommand(command=cmd)
def Verify(self):
try:
VerifyFirefoxDownload(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
class FirefoxExtractSource(Step):
def Preflight(self):
conf = self.config
PlatformCheck(conf)
try:
VerifyFirefoxDownload(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
def Execute(self):
conf = self.config
sourceTarball = JoinPaths(conf.rootDir,
os.path.basename(conf.Get('source_download_url')))
cmd = [ conf.GetConstant('TAR'),
'-xvjf',
sourceTarball ]
rv = LoggedShellCommand(command=cmd,
workdir=conf.rootDir)
def Verify(self):
conf = self.config
firefoxTestFiles = conf.Get('source_test_files', list)
sourceRoot = GetSourceDirRoot(conf)
try:
VerifyFileList(firefoxTestFiles, sourceRoot)
except ValueError, ex:
raise self.SimpleStepError("Missing Firefox source file: %s" % (ex))
class FirefoxConfigureBuild(Step):
def _GetMozconfigFilename(self):
conf = self.config
return JoinPaths(GetSourceDirRoot(conf), conf.Get('mozconfig_file'))
def Preflight(self):
PlatformCheck(self.config)
mcFile = self._GetMozconfigFilename()
if os.path.isfile(mcFile):
raise self.SimpleStepError("Existing Mozconfig is in the way: %s" %
mcFile)
def Execute(self):
conf = self.config
mozConfigHandle = open(self._GetMozconfigFilename(), 'w')
for line in re.split('\n+', self.config.Get('mozconfig_lines')):
print >> mozConfigHandle, line.strip()
mozConfigHandle.close()
LoggedShellCommand(command=[conf.GetConstant('MAKE'),
'-f', 'client.mk', 'configure'],
workdir=GetSourceDirRoot(conf),
verbose=True )
def Verify(self):
try:
VerifyFirefoxBuildConfigured(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
class FirefoxDoBuild(Step):
def Preflight(self):
conf = self.config
PlatformCheck(conf)
try:
VerifyFirefoxBuildConfigured(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
def Execute(self):
conf = self.config
rv = LoggedShellCommand(command=[conf.GetConstant('MAKE')],
workdir=GetObjDir(conf),
timeout=7200)
print "\n\n***\nFull firefox build took %d seconds.\n***\n\n" % (
rv.runningtime)
def Verify(self):
try:
VerifySuccessfulFirefoxBuild(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
class FirefoxDoInstallerBuild(Step):
def Preflight(self):
conf = self.config
PlatformCheck(conf)
try:
VerifySuccessfulFirefoxBuild(self.config)
except ValueError, ex:
raise self.SimpleStepError(str(ex))
def Execute(self):
conf = self.config
rv = LoggedShellCommand(command=[conf.GetConstant('MAKE'),
'-C', 'browser/installer'],
workdir=GetObjDir(conf))
def Verify(self):
conf = self.config
distDir = JoinPaths(GetObjDir(conf), 'dist')
delivsFound = FindDeliverables(distDir, conf)
if delivsFound <= 0:
raise self.SimpleStepError("No deliverables found after installer "
"build?")
else:
print "Deliverables found in %s: %d" % (distDir, delivsFound)
for d in GetAllDeliverables():
print "Name %s -> %s" % (d.name, d.file)
installerObj = GetDeliverable('installer:linux:en-US')
if installerObj is None:
raise self.SimpleStepError("No installer found after installer "
"build")
else:
tmpDir = tempfile.gettempdir()
print "Copying installer %s to %s" % (installerObj, tmpDir)
shutil.copy(installerObj.fileName, tmpDir)
# You probably don't need to do this, but as an example of some
# of the utility functions...
ourInstallerHash = GetSHA1FileHash(installerObj.fileName)
tmpInstallerHash = GetSHA1FileHash(JoinPaths(tmpDir,
installerObj.file))
if ourInstallerHash != tmpInstallerHash:
raise self.SimpleStepError("Hashes do not match; orig: %s; copy: %s"
% (ourInstallerHash, tmpInstallerHash))
| {
"content_hash": "af1c2549fb1ccd2bcb5da2c6a0fb2510",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 89,
"avg_line_length": 33.92,
"alnum_prop": 0.5940026954177897,
"repo_name": "preed/quickrelease",
"id": "97ee29e46fc2dccfe53ee6614588504b2961e1c7",
"size": "11873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/firefox/FirefoxSampleSteps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201128"
}
],
"symlink_target": ""
} |
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.db import models
from event import Event
from notificationtemplateconfig import NotificationTemplateConfig
class PublicFeedItem(models.Model):
event = models.ForeignKey(Event)
template_config = models.ForeignKey(NotificationTemplateConfig)
context = models.CharField(default=u'default', max_length=255)
seen = models.BooleanField(default=False)
site = models.ForeignKey(Site)
objects = models.Manager()
on_site = CurrentSiteManager()
def __init__(self, *args, **kwargs):
super(PublicFeedItem, self).__init__(*args, **kwargs)
if not self.pk and not self.site_id:
self.site_id = self.event.site_id or Site.objects.get_current().pk
class Meta:
app_label = 'notifications'
unique_together = ('event', 'context')
def __unicode__(self):
return u"%s - %s" % (self.event.type.name, unicode(self.event.target_object))
| {
"content_hash": "b4ff11f70336a646a51caa4baa942352",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 35.37931034482759,
"alnum_prop": 0.7017543859649122,
"repo_name": "suselrd/django-notifications",
"id": "47f0153f37175a9b8e9aaa136fc8c1d3b05543cd",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifications/models/publicfeeditem.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "248588"
}
],
"symlink_target": ""
} |
import copy
from rl_coach.agents.ddpg_agent import DDPGAgentParameters
from rl_coach.base_parameters import VisualizationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.carla_environment import CarlaEnvironmentParameters, CameraTypes, CarlaInputFilter
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
####################
# Graph Scheduling #
####################
schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(10000000000)
schedule_params.steps_between_evaluation_periods = EnvironmentEpisodes(20)
schedule_params.evaluation_steps = EnvironmentEpisodes(1)
schedule_params.heatup_steps = EnvironmentSteps(1000)
#########
# Agent #
#########
agent_params = DDPGAgentParameters()
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(4)
# front camera
agent_params.network_wrappers['actor'].input_embedders_parameters['forward_camera'] = \
agent_params.network_wrappers['actor'].input_embedders_parameters.pop('observation')
agent_params.network_wrappers['critic'].input_embedders_parameters['forward_camera'] = \
agent_params.network_wrappers['critic'].input_embedders_parameters.pop('observation')
# left camera
agent_params.network_wrappers['actor'].input_embedders_parameters['left_camera'] = \
copy.deepcopy(agent_params.network_wrappers['actor'].input_embedders_parameters['forward_camera'])
agent_params.network_wrappers['critic'].input_embedders_parameters['left_camera'] = \
copy.deepcopy(agent_params.network_wrappers['critic'].input_embedders_parameters['forward_camera'])
# right camera
agent_params.network_wrappers['actor'].input_embedders_parameters['right_camera'] = \
copy.deepcopy(agent_params.network_wrappers['actor'].input_embedders_parameters['forward_camera'])
agent_params.network_wrappers['critic'].input_embedders_parameters['right_camera'] = \
copy.deepcopy(agent_params.network_wrappers['critic'].input_embedders_parameters['forward_camera'])
agent_params.input_filter = CarlaInputFilter()
agent_params.input_filter.copy_filters_from_one_observation_to_another('forward_camera', 'left_camera')
agent_params.input_filter.copy_filters_from_one_observation_to_another('forward_camera', 'right_camera')
###############
# Environment #
###############
env_params = CarlaEnvironmentParameters()
env_params.cameras = [CameraTypes.FRONT, CameraTypes.LEFT, CameraTypes.RIGHT]
graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
schedule_params=schedule_params, vis_params=VisualizationParameters())
| {
"content_hash": "0430208f5721198de987763709bef618",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 109,
"avg_line_length": 50.907407407407405,
"alnum_prop": 0.766460531102219,
"repo_name": "NervanaSystems/coach",
"id": "d1da7a7d0d3b908a16787076ae1e2188a4e0b00a",
"size": "2749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_coach/presets/CARLA_3_Cameras_DDPG.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "CSS",
"bytes": "6493"
},
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Jupyter Notebook",
"bytes": "91174"
},
{
"name": "Makefile",
"bytes": "5036"
},
{
"name": "Python",
"bytes": "1926733"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
} |
"""Class for managing the motor state."""
import logging
from constants import \
LEFT_MOTOR, RIGHT_MOTOR,\
MOTOR_DIRECTION_FORWARD, MOTOR_DIRECTION_BACKWARD
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.getLevelName('INFO'))
class MotorController:
"""Object to manage the motors."""
def __init__(self, driver):
"""Construct a MotorManager."""
self.driver = driver
LOGGER.info("Motor manager initialized")
def set_speed(self, motor, speed, direction):
"""Set the speed of a motor pin."""
if motor not in (LEFT_MOTOR, RIGHT_MOTOR):
LOGGER.error("Unknown motor %s", motor)
return
try:
speed = int(speed)
except ValueError:
LOGGER.error('Speed %s cannot be parsed as an integer', speed)
return
if speed < 0:
LOGGER.warning("Inverting direction %s because "
"of negative motor speed %s",
direction, speed)
speed = abs(speed)
direction = MOTOR_DIRECTION_BACKWARD \
if direction == MOTOR_DIRECTION_FORWARD \
else MOTOR_DIRECTION_FORWARD
if direction == MOTOR_DIRECTION_FORWARD:
driver_direction = self.driver.DIRECTION_FORWARD
elif direction == MOTOR_DIRECTION_BACKWARD:
driver_direction = self.driver.DIRECTION_BACKWARD
else:
LOGGER.error("Invalid direction %s", direction)
return
LOGGER.info("Setting %s to %s with direction %s",
motor, speed, direction)
if motor == LEFT_MOTOR:
self.driver.set_left_speed(speed, driver_direction)
else:
self.driver.set_right_speed(speed, driver_direction)
| {
"content_hash": "cc1a8dc23f8afa07b1b80ce11532ea6f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 34.64150943396226,
"alnum_prop": 0.5887799564270153,
"repo_name": "aninternetof/rover-code",
"id": "0505eaea006b0afa3c79fec2bc7bb348e8f9bcf4",
"size": "1836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rovercode/motor_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30711"
},
{
"name": "HTML",
"bytes": "350136"
},
{
"name": "PHP",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "383"
}
],
"symlink_target": ""
} |
import helper
greeting("develop test")
| {
"content_hash": "ba89ad2f1b8f43e8dcf2384c980168fa",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 24,
"avg_line_length": 13.333333333333334,
"alnum_prop": 0.775,
"repo_name": "mbe9a/cs3240-labdemo",
"id": "79cfbd50f9a2b1efe31bb61606220bd853156b2b",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "228"
}
],
"symlink_target": ""
} |
import argparse
import functools
import json
import math
import os
import sys
import urllib.request
# MIN_VERSION is the earliest working version of the updater for self-update
# testing. If a backwards-incompatible change to the updater is made, it may be
# necessary to increase the version.
MIN_VERSION = 1064314
def get_platform():
return 'Win'
def int_or_inf(v):
try:
return int(v)
except ValueError:
return -float('inf')
def fetch(platform, minimum, minimum_lexographic):
"""
Queries GCS for versions and returns a tuple (min, max), where min is the
(numerically) lowest version greater than `minimum` returned by GCS, and
max is the greatest (lexographically) version returned by GCS. Because GCS
compares and returns items in lexographic order, GCS may return no eligible
min items. (For example, if minimum = 200, it could return 30, 31, 32...)
In this case, min will be float('inf') and the caller should query with max
as the new minimum_lexographic.
"""
return functools.reduce(
lambda a, b: (min(a[0], int(b)) if int_or_inf(b) > minimum else a[0],
max(a[1], b)),
map(
lambda s: s[len(platform) + 1:-1],
json.load(
urllib.request.urlopen(
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o?prefix=%s%%2F&startOffset=%s'
'%%2F%s&fields=prefixes&delimiter=%%2F' %
(platform, platform,
minimum_lexographic)))['prefixes']),
(float('inf'), ''))
def find(platform, minimum, maximum):
"""
Returns a version from GCS closest to `minimum` but not more than `maximum`
for `platform`. May return maximum even if it does not exist in GCS.
"""
found_min = maximum
pivot = str(minimum)
while pivot < str(maximum):
found, pivot = fetch(platform, minimum, pivot)
found_min = min(found_min, found)
return found_min
def lastDatum(platform):
"""
Returns a version from GCS that only updates every n versions.
"""
latest = int(
urllib.request.urlopen(
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o/%s%%2FLAST_CHANGE?alt=media' % platform).read())
return max(MIN_VERSION,
find(platform, latest - latest % 1000, latest))
def print_latest():
print(lastDatum(get_platform()))
def get_url():
print(
json.dumps({
'url': [
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o/%s%%2F%s%%2Fupdater.zip?alt=media'
% (get_platform(), os.environ['_3PP_VERSION'])
],
'ext':
'.zip',
'name': ['updater.zip']
}))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
sub.add_parser('latest').set_defaults(func=lambda _opts: print_latest())
sub.add_parser('get_url').set_defaults(func=lambda _opts: get_url())
opts = ap.parse_args()
return opts.func(opts)
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "e3c3278c678fd6f2b494db27fc45363c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 90,
"avg_line_length": 31.21359223300971,
"alnum_prop": 0.6024883359253499,
"repo_name": "chromium/chromium",
"id": "6e134000b8e32e11d9d1ad3f47d346af5ea97e18",
"size": "3379",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/updater/chromium_win_x86/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import pytest
import flask_restful_swagger
from flask_restful_swagger.swagger import StaticFiles
try:
from unittest.mock import patch
except ImportError:
from mock import patch
test_fixtures_renders = [
["index.html", None, None],
["o2c.html", None, None],
["swagger-ui.js", None, None],
["swagger-ui.min.js", None, None],
["lib/swagger-oauth.js", None, None],
]
@patch("flask_restful_swagger.swagger.render_page")
@patch("flask_restful_swagger.swagger._get_current_registry")
@pytest.mark.parametrize("dir1,dir2,dir3", test_fixtures_renders)
def test_get_valid_content_renders(registry, render_page, dir1, dir2, dir3):
static_files = StaticFiles()
registry.return_value = {"spec_endpoint_path": "dummy"}
static_files.get(dir1, dir2, dir3)
assert render_page.call_args[0] == (dir1, {"resource_list_url": "dummy"})
test_fixtures_none = [[None, None, None]]
@patch("flask_restful_swagger.swagger.render_page")
@patch("flask_restful_swagger.swagger._get_current_registry")
@pytest.mark.parametrize("dir1,dir2,dir3", test_fixtures_none)
def test_get_valid_content_renders_none(
registry, render_page, dir1, dir2, dir3
):
static_files = StaticFiles()
registry.return_value = {"spec_endpoint_path": "dummy"}
static_files.get(dir1, dir2, dir3)
assert render_page.call_args[0] == (
"index.html",
{"resource_list_url": "dummy"},
)
test_fixtures_mimes = [
["index2.html", "text/plain"],
["image.gif", "image/gif"],
["image.png", "image/png"],
["javascript.js", "text/javascript"],
["style.css", "text/css"],
]
@patch("flask_restful_swagger.swagger.Response", autospec=True)
@patch("flask_restful_swagger.swagger.open")
@patch("flask_restful_swagger.swagger.os.path.exists")
@patch("flask_restful_swagger.swagger._get_current_registry")
@pytest.mark.parametrize("dir1,mime", test_fixtures_mimes)
def test_get_valid_content_mime(
registry, mock_exists, mock_open, response, dir1, mime
):
mock_open.return_value = "file_handle"
mock_exists.return_value = True
static_files = StaticFiles()
static_files.get(dir1, None, None)
assert mock_exists.called
assert mock_open.called
args, kwargs = response.call_args_list[0]
assert args == ("file_handle",)
assert kwargs == {"mimetype": mime}
test_fixtures_mimes_does_not_exist = ["index2.html"]
@patch("flask_restful_swagger.swagger.os.path.exists")
@patch("flask_restful_swagger.swagger._get_current_registry")
@patch("flask_restful_swagger.swagger.abort")
@pytest.mark.parametrize("dir1", test_fixtures_mimes_does_not_exist)
def test_get_valid_content_mime_file_does_not_exist(
abort, registry, mock_exists, dir1
):
mock_exists.return_value = False
static_files = StaticFiles()
static_files.get(dir1, None, None)
assert mock_exists.called
assert abort.called
test_fixtures_paths = [
["paths", "index2.html", None, "paths/index2.html"],
["paths", "more_paths", "index2.html", "paths/more_paths/index2.html"],
]
@patch("flask_restful_swagger.swagger.Response", autospec=True)
@patch("flask_restful_swagger.swagger.os.path.exists")
@patch("flask_restful_swagger.swagger.open")
@patch("flask_restful_swagger.swagger.render_page")
@patch("flask_restful_swagger.swagger._get_current_registry")
@pytest.mark.parametrize("dir1,dir2,dir3,expected", test_fixtures_paths)
def test_get_valid_content_paths(
registry,
render_page,
mock_open,
mock_exists,
response,
dir1,
dir2,
dir3,
expected,
):
mock_open.return_value = "file_handle"
mock_exists.return_value = True
static_files = StaticFiles()
registry.return_value = {"spec_endpoint_path": "dummy"}
static_files.get(dir1, dir2, dir3)
module_path = os.path.dirname(flask_restful_swagger.__file__)
static_files = "static"
full_path = os.path.join(module_path, static_files, expected)
assert mock_exists.called
assert mock_open.call_args_list[0][0][0] == full_path
args, kwargs = response.call_args_list[0]
assert args == ("file_handle",)
assert kwargs == {"mimetype": "text/plain"}
| {
"content_hash": "a5e0840c05a3f530652cca6f3deabf97",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 77,
"avg_line_length": 28.895833333333332,
"alnum_prop": 0.6887767363614515,
"repo_name": "rantav/flask-restful-swagger",
"id": "b188ccae79c408e707b596e4a98292c9d560e7d7",
"size": "4161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_staticfiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "89588"
},
{
"name": "HTML",
"bytes": "9503"
},
{
"name": "JavaScript",
"bytes": "961284"
},
{
"name": "Makefile",
"bytes": "166"
},
{
"name": "Python",
"bytes": "18195"
}
],
"symlink_target": ""
} |
import discord
import cabbagerc as rc
from discord.ext import commands
from util.FlagFramework import FlagFramework
from util.Logger import Logger
from sql.cabbagebase import CabbageBase
from datetime import datetime
class StarThresholdError:
''' Thrown when a caller attempts to set the starboard threshold (the
number of stars required for the bot to post the message on the
starboard) to an invalid value.
'''
pass
class Starboard:
''' Class representing a server's Starboard '''
def __init__(self, server, bot):
self.log = Logger()
self.base = CabbageBase()
self.flags = FlagFramework()
self.server = server
self.bot = bot
self.sbChanid = None
self.sbChannel = None
self.thresh = None
if self.flags.hasFlag('channel', 'star', self.server.id):
self.sbChanid = self.flags.getFlag('channel', 'star', self.server.id)[0]['flag']
self.sbChannel = self.server.get_channel(self.sbChanid)
if self.flags.hasFlag('thresh', 'star', self.server.id):
self.thresh = self.flags.getFlag('thresh', 'star', self.server.id)[0]['flag']
def isWorking(self):
''' Returns true if the server's starboard has been set up and is
valid.
'''
return self.sbChannel and self.thresh and self.thresh > 0
def reset(self, channel, thresh):
''' Resets the starboard parameters to the provided ones.
'''
self.sbChannel = channel
if thresh > 0:
self.thresh = thresh
else:
raise StarThresholdError
self.log.log('Starboard for server ' + self.server.id + ' updated: parameters changed', 'star', 7)
self._update()
def _update(self):
''' Updates the server flags and database entries for the starboard.
Note that this does NOT check any messages for new stars or
update starboard messages.
'''
if self.thresh <= 0:
raise StarThresholdError
self.flags.tset('channel', 'star', self.server.id, self.sbChannel.id)
self.flags.iset('thresh', 'star', self.server.id, self.thresh)
self.sbChanId = self.sbChannel.id
def getStarboardMessage(self, chanid, messid):
''' Return a StarMessage object for the provided message.
'''
q = self.base.query('starboard', ('original_message_channel','starboard_message_messid'), (('server',int(self.server.id)),('original_message_channel',int(chanid)),('original_message_messid',int(messid))))
if q and len(q) > 0:
sm = StarMessage(self.server.id, q[0][0], messid, q[0][1])
else:
sm = StarMessage(self.server.id, chanid, messid)
return sm
def _determineAppropriateStarEmoji(self, numStars):
''' Determines the appropriate star emoji
'''
if numStars < self.thresh:
return '⚫'
elif numStars < (1.5 * self.thresh):
return '⭐'
elif numStars < (2 * self.thresh):
return '✴'
elif numStars < (3 * self.thresh):
return '🌠'
else:
return '🌌'
async def _postOrUpdateStarboardMessage(self, msg, channel):
''' Posts a message to the starboard, or (if it is already there)
updates it to reflect changes in star totals.
'''
srv = self.bot.get_server(msg.server)
cha = channel
mes = await self.bot.get_message(cha, msg.messid)
sbMessid = None
if msg.starboardMessid:
sbMessid = msg.starboardMessid
else:
# The message indicates that it is not yet on the starboard, but
# check anyway.
q = self.base.query('starboard', ('starboard_message_messid',), (('server',int(self.server.id)),('original_message_channel',int(self.sbChanid)),('original_message_messid',int(msg.messid))))
if q and len(q) > 0:
# It was actually on the starboard.
sbMessid = q[0][0]
newEmbed = msg.constructEmbed(mes)
numStars = msg.getStars()
header = '**' + self._determineAppropriateStarEmoji(numStars) + str(numStars) + ' ' + cha.mention + '**'
if sbMessid:
sbMessage = await self.bot.get_message(self.sbChannel, sbMessid)
await self.bot.edit_message(sbMessage, header, embed=newEmbed)
else:
newSbMes = await self.bot.send_message(self.sbChannel, header, embed=newEmbed)
cmd = 'INSERT INTO starboard (server, starboard, starboard_message_messid, original_message_channel, original_message_messid, original_message_sent) VALUES (%s,%s,%s,%s,%s,%s)'
cur = self.base.getCursor()
cur.execute(cmd, (int(self.server.id), int(self.sbChanid), int(newSbMes.id), int(mes.channel.id), int(mes.id), mes.timestamp))
self.base.commit()
cur.close()
class StarMessage:
''' Class representing a message with stars '''
def __init__(self, server, chanid, messid, starboardMessid=None):
self.base = CabbageBase()
self.server = server
self.chanid = chanid
self.messid = messid
self.starboardMessid = starboardMessid
def star(self, uid):
''' Adds a star to a message, as long as it has not been starred by
the same user before.
'''
if not self.hasStarred(uid):
cur = self.base.getCursor()
insertString = 'INSERT INTO stars (server,chanid,messid,starrer) VALUES (%s,%s,%s,%s);'
cur.execute(insertString, (self.server, self.chanid, self.messid, uid))
self.base.commit()
cur.close()
def unstar(self, uid):
''' Removes a star from a message.
'''
if self.hasStarred(self, uid):
cur = self.base.getCursor()
delString = 'DELETE FROM ONLY stars WHERE server=%s AND chanid=%s AND messid=%s AND starrer=%s'
cur.execute(delString, (self.server, self.chanid, self.messid, uid))
self.base.commit()
cur.close()
def getStars(self):
''' Returns the number of unique users who have starred the message.
'''
res = self.base.query('stars', ('server',), (('server', int(self.server)), ('messid', int(self.messid))))
return len(res)
def hasStarred(self, uid):
''' Determines whether the provided user has previously starred the
message.
'''
res = self.base.query('stars', ('server',), (('server', int(self.server)), ('messid', int(self.messid)), ('starrer', int(uid))))
return len(res) > 0
def constructEmbed(self, messageObject):
''' Constructs the embed object to be used in the starboard message.
'''
embed = discord.Embed(colour=discord.Colour(0x7f3e96), description=messageObject.content, timestamp=messageObject.timestamp)
embed.set_author(name=messageObject.author.name, icon_url=messageObject.author.avatar_url)
if messageObject.attachments and len(messageObject.attachments) > 0:
attachment = messageObject.attachments[0]
if attachment['filename'].lower().endswith(('gif', 'jpg', 'jpeg', 'png')):
embed.set_image(url=attachment['url'])
else:
embed.add_field(name='Also attached',value=str(attachment['filename']))
return embed
async def updateStarboard(self, serverObject, channelObject, bot):
''' Check if we're above the threshold value for the starboard. If so,
ask to be posted.
'''
s = Starboard(serverObject, bot)
if self.getStars() >= s.thresh:
await s._postOrUpdateStarboardMessage(self, channelObject)
| {
"content_hash": "0cafc4e8b36473eb9bf22655109cede8",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 206,
"avg_line_length": 31.65740740740741,
"alnum_prop": 0.6937701082187774,
"repo_name": "alexandershuping/cabbage-bot",
"id": "f3634b2633568878fa4953d0871a976d70c6ce06",
"size": "6850",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "util/StarFramework.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78998"
}
],
"symlink_target": ""
} |
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from TCA.administration.models import Father
from TCA.administration.utils import get_user_type
from .models import Stream
@login_required
def stream(request, grade_id):
user = request.user
user_type = get_user_type(user)
if not (user_type == 'teacher' or user.is_staff):
if user_type == 'student':
raise Http404('No está autorizado para ver está página.')
else:
father = Father.objects.get(user=user)
sons = father.sons.all()
grades = [int(s.grade.id) for s in sons]
if int(grade_id) not in grades:
raise Http404('No está autorizado para ver está página.')
stream = get_object_or_404(Stream, grade=grade_id)
context = {'stream': stream}
return render(request, 'stream/stream.html', context)
@login_required
def allowed_streams(request):
"""Return a list of the allowed streams a user can see."""
user = request.user
user_type = get_user_type(user)
if user_type in ['teacher', 'admin']:
streams = Stream.objects.all()
elif user_type == 'father':
father = Father.objects.get(user=user)
sons = father.sons.all()
grades = [s.grade.id for s in sons]
streams = Stream.objects.filter(grade__id__in=grades)
else:
raise Http404('No está autorizado para ver está página.')
context = {'streams': streams}
return render(request, 'stream/allowed_streams.html', context)
| {
"content_hash": "368d2800eab714066bd9b3f5afc0b193",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 73,
"avg_line_length": 36.47727272727273,
"alnum_prop": 0.6560747663551402,
"repo_name": "JosmanPS/tsebaoth-christian-academy",
"id": "b4647ce5665ab806415712051fc775290427c530",
"size": "1639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TCA/stream/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155074"
},
{
"name": "HTML",
"bytes": "51352"
},
{
"name": "JavaScript",
"bytes": "302885"
},
{
"name": "Python",
"bytes": "88031"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MenuPluginSettings.template'
db.add_column('cmsplugin_menupluginsettings', 'template', self.gf('django.db.models.fields.CharField')(default='cmsplugin_embeddedmenu/layouts/default.html', max_length=256), keep_default=False)
def backwards(self, orm):
# Deleting field 'MenuPluginSettings.template'
db.delete_column('cmsplugin_menupluginsettings', 'template')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_embeddedmenu.menupluginsettings': {
'Meta': {'object_name': 'MenuPluginSettings', 'db_table': "'cmsplugin_menupluginsettings'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'root': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['cms.Page']"}),
'start_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'cmsplugin_embeddedmenu/layouts/default.html'", 'max_length': '256'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cmsplugin_embeddedmenu']
| {
"content_hash": "0e81a6b521e36315637f1a1b47966522",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 202,
"avg_line_length": 78.33720930232558,
"alnum_prop": 0.5723615852753451,
"repo_name": "airtonix/cmsplugin-embedded-menu",
"id": "6417e7dc74cb804ad04d581ce480b709d5a0ac87",
"size": "6755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_embeddedmenu/migrations/0002_auto__add_field_menupluginsettings_template.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54441"
}
],
"symlink_target": ""
} |
"""
Implements distribution file storage.
"""
from os import makedirs, remove, walk
from os.path import basename, exists, isdir, join
from magic import from_buffer
from cheddar.model.versions import is_pre_release
class DistributionStorage(object):
"""
File system storage with release/pre-release partitioning.
"""
def __init__(self, base_dir, logger):
"""
Initialize storage.
:param base_dir: root directory for storage
"""
self.logger = logger
self.base_dir = base_dir
self.release_dir = join(base_dir, "releases")
self.pre_release_dir = join(base_dir, "pre-releases")
self._make_base_dirs()
def exists(self, name):
return exists(self.compute_path(name))
def read(self, name):
"""
Read entry for name from storage.
:returns: content data and content type, as a tuple
"""
if not self.exists(name):
self.logger.debug("No file exists for: {}".format(name))
return None
with open(self.compute_path(name)) as file_:
content_data = file_.read()
content_type = from_buffer(content_data, mime=True)
self.logger.debug("Computed content type: {} for: {}".format(content_type, name))
return content_data, content_type
def write(self, name, data):
"""
Write entry to storage.
"""
self._make_base_dirs()
path = self.compute_path(name)
with open(path, "wb") as file_:
file_.write(data)
self.logger.debug("Wrote file for: {}".format(name))
return path
def remove(self, name):
"""
Write entry to storage.
"""
try:
remove(self.compute_path(name))
self.logger.debug("Removed file for: {}".format(name))
return True
except OSError:
self.logger.debug("Unable to remove file for: {}".format(name))
return False
def compute_path(self, name):
"""
Compute file system path.
Path incorporates "pre-release" or "release" to easily
differentiate released distributions for backup.
"""
base_dir = self.pre_release_dir if is_pre_release(name) else self.release_dir
path = join(base_dir, basename(name))
self.logger.debug("Computed path: {} for: {}".format(path, name))
return path
def __iter__(self):
for dirpath, _, filenames in walk(self.base_dir):
for filename in filenames:
yield join(dirpath, filename)
def _make_base_dirs(self):
"""
Ensure that base dirs exists.
"""
for dir_ in [self.release_dir, self.pre_release_dir]:
if not isdir(dir_):
makedirs(dir_)
| {
"content_hash": "59973c83606427b710878bb372463e71",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 93,
"avg_line_length": 30.21276595744681,
"alnum_prop": 0.5753521126760563,
"repo_name": "jessemyers/cheddar",
"id": "6584f00428df1ea78537d74f67534e1934e17b6a",
"size": "2840",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cheddar/index/storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4032"
},
{
"name": "Python",
"bytes": "74930"
}
],
"symlink_target": ""
} |
"""SIP EC config_db event class."""
import datetime
class Event:
"""Event class."""
def __init__(self,
event_id: str,
event_type: str,
event_data: dict = None,
event_origin: str = None,
event_timestamp: datetime.datetime = None,
object_type: str = None,
object_id: str = None,
object_key: str = None):
"""Create an Event object.
Args:
event_id (str): Event Identifier
event_type (str): Type of event
event_data (dict, optional): Event data
event_origin (str, optional): Event origin
event_timestamp (datetime.datetime, optional): Created time
object_type (str, optional): Object type associated with the event
object_id (str, optional): Object Id associated with the event
object_key (str, optional): Object key,
"""
if event_timestamp is None:
event_timestamp = datetime.datetime.utcnow().isoformat()
self._event = dict(
id=event_id,
type=event_type,
data=event_data,
origin=event_origin,
timestamp=event_timestamp,
object_type=object_type,
object_id=object_id,
object_key=object_key
)
@classmethod
def from_config(cls, config: dict):
"""Create an event object from an event dictionary object.
Args:
config (dict): Event Configuration dictionary.
"""
timestamp = config.get('timestamp', None)
return cls(config.get('id'),
config.get('type'),
config.get('data', dict()),
config.get('origin', None),
timestamp,
config.get('object_type', None),
config.get('object_id', None),
config.get('object_key', None))
def __str__(self):
"""Generate the 'informal' string representation.
Used by the print statement.
"""
return str(self._event)
def __repr__(self):
"""Generate the 'official' string representation.
eg. used when printing lists of objects.
"""
return '{}'.format(self._event.get('id'))
@property
def config(self) -> dict:
"""Event configuration dictionary."""
return self._event
# pylint: disable=invalid-name
@property
def id(self) -> str:
"""Return the event id."""
return self._event.get('id')
@property
def type(self) -> str:
"""Return the type of event."""
return self._event.get('type')
@property
def data(self) -> dict:
"""Return the event data."""
return self._event.get('data')
@property
def origin(self) -> str:
"""Return the event data."""
return self._event.get('origin')
@property
def timestamp(self) -> str:
"""Return the event data."""
return self._event.get('timestamp')
@property
def object_type(self) -> str:
"""Return the object type associated with the event."""
return self._event.get('object_type')
@property
def object_id(self) -> str:
"""Return the object id associated with the event."""
return self._event.get('object_id')
@property
def object_key(self) -> str:
"""Return the object key associated with the event."""
return self._event.get('object_key')
| {
"content_hash": "648d7c3e01293490c9e912154f00ec05",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 30.126050420168067,
"alnum_prop": 0.5364016736401673,
"repo_name": "SKA-ScienceDataProcessor/integration-prototype",
"id": "df97fda71f24d8975af5c38ea1b2c09131fe2cfb",
"size": "3600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sip/execution_control/configuration_db/sip_config_db/_events/event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "45998"
},
{
"name": "C++",
"bytes": "1384907"
},
{
"name": "CMake",
"bytes": "38961"
},
{
"name": "Dockerfile",
"bytes": "8629"
},
{
"name": "Python",
"bytes": "444510"
},
{
"name": "SQLPL",
"bytes": "32667"
},
{
"name": "Shell",
"bytes": "28774"
},
{
"name": "TSQL",
"bytes": "18857"
}
],
"symlink_target": ""
} |
"""Hypergeometric and Meijer G-functions"""
from sympy.core import S, I, pi, oo, ilcm, Mod
from sympy.core.function import Function, Derivative, ArgumentIndexError
from sympy.core.containers import Tuple
from sympy.core.mul import Mul
from sympy.functions import (sqrt, exp, log, sin, cos, asin, atan,
sinh, cosh, asinh, acosh, atanh, acoth)
# TODO should __new__ accept **options?
# TODO should constructors should check if parameters are sensible?
def _prep_tuple(v):
"""
Turn an iterable argument V into a Tuple and unpolarify, since both
hypergeometric and meijer g-functions are unbranched in their parameters.
Examples:
>>> from sympy.functions.special.hyper import _prep_tuple
>>> _prep_tuple([1, 2, 3])
(1, 2, 3)
>>> _prep_tuple((4, 5))
(4, 5)
>>> _prep_tuple((7, 8, 9))
(7, 8, 9)
"""
from sympy.simplify.simplify import unpolarify
return Tuple(*[unpolarify(x) for x in v])
class TupleParametersBase(Function):
""" Base class that takes care of differentiation, when some of
the arguments are actually tuples. """
# This is not deduced automatically since there are Tuples as arguments.
is_commutative = True
def _eval_derivative(self, s):
try:
res = 0
if self.args[0].has(s) or self.args[1].has(s):
for i, p in enumerate(self._diffargs):
m = self._diffargs[i].diff(s)
if m != 0:
res += self.fdiff((1, i))*m
return res + self.fdiff(3)*self.args[2].diff(s)
except (ArgumentIndexError, NotImplementedError):
return Derivative(self, s)
class hyper(TupleParametersBase):
r"""
The (generalized) hypergeometric function is defined by a series where
the ratios of successive terms are a rational function of the summation
index. When convergent, it is continued analytically to the largest
possible domain.
The hypergeometric function depends on two vectors of parameters, called
the numerator parameters :math:`a_p`, and the denominator parameters
:math:`b_q`. It also has an argument :math:`z`. The series definition is
.. math ::
{}_pF_q\left(\begin{matrix} a_1, \dots, a_p \\ b_1, \dots, b_q \end{matrix}
\middle| z \right)
= \sum_{n=0}^\infty \frac{(a_1)_n \dots (a_p)_n}{(b_1)_n \dots (b_q)_n}
\frac{z^n}{n!},
where :math:`(a)_n = (a)(a+1)\dots(a+n-1)` denotes the rising factorial.
If one of the :math:`b_q` is a non-positive integer then the series is
undefined unless one of the `a_p` is a larger (i.e. smaller in
magnitude) non-positive integer. If none of the :math:`b_q` is a
non-positive integer and one of the :math:`a_p` is a non-positive
integer, then the series reduces to a polynomial. To simplify the
following discussion, we assume that none of the :math:`a_p` or
:math:`b_q` is a non-positive integer. For more details, see the
references.
The series converges for all :math:`z` if :math:`p \le q`, and thus
defines an entire single-valued function in this case. If :math:`p =
q+1` the series converges for :math:`|z| < 1`, and can be continued
analytically into a half-plane. If :math:`p > q+1` the series is
divergent for all :math:`z`.
Note: The hypergeometric function constructor currently does *not* check
if the parameters actually yield a well-defined function.
Examples
========
The parameters :math:`a_p` and :math:`b_q` can be passed as arbitrary
iterables, for example:
>>> from sympy.functions import hyper
>>> from sympy.abc import x, n, a
>>> hyper((1, 2, 3), [3, 4], x)
hyper((1, 2, 3), (3, 4), x)
There is also pretty printing (it looks better using unicode):
>>> from sympy import pprint
>>> pprint(hyper((1, 2, 3), [3, 4], x), use_unicode=False)
_
|_ /1, 2, 3 | \
| | | x|
3 2 \ 3, 4 | /
The parameters must always be iterables, even if they are vectors of
length one or zero:
>>> hyper((1, ), [], x)
hyper((1,), (), x)
But of course they may be variables (but if they depend on x then you
should not expect much implemented functionality):
>>> hyper((n, a), (n**2,), x)
hyper((n, a), (n**2,), x)
The hypergeometric function generalizes many named special functions.
The function hyperexpand() tries to express a hypergeometric function
using named special functions.
For example:
>>> from sympy import hyperexpand
>>> hyperexpand(hyper([], [], x))
exp(x)
You can also use expand_func:
>>> from sympy import expand_func
>>> expand_func(x*hyper([1, 1], [2], -x))
log(x + 1)
More examples:
>>> from sympy import S
>>> hyperexpand(hyper([], [S(1)/2], -x**2/4))
cos(x)
>>> hyperexpand(x*hyper([S(1)/2, S(1)/2], [S(3)/2], x**2))
asin(x)
We can also sometimes hyperexpand parametric functions:
>>> from sympy.abc import a
>>> hyperexpand(hyper([-a], [], x))
(-x + 1)**a
See Also
========
sympy.simplify.hyperexpand
sympy.functions.special.gamma_functions.gamma
meijerg
References
==========
- Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
- http://en.wikipedia.org/wiki/Generalized_hypergeometric_function
"""
nargs = 3
def __new__(cls, ap, bq, z):
# TODO should we check convergence conditions?
return Function.__new__(cls, _prep_tuple(ap), _prep_tuple(bq), z)
@classmethod
def eval(cls, ap, bq, z):
from sympy import unpolarify
if len(ap) <= len(bq):
nz = unpolarify(z)
if z != nz:
return hyper(ap, bq, nz)
def fdiff(self, argindex=3):
if argindex != 3:
raise ArgumentIndexError(self, argindex)
nap = Tuple(*[a + 1 for a in self.ap])
nbq = Tuple(*[b + 1 for b in self.bq])
fac = Mul(*self.ap)/Mul(*self.bq)
return fac*hyper(nap, nbq, self.argument)
def _eval_expand_func(self, **hints):
from sympy import gamma, hyperexpand
if len(self.ap) == 2 and len(self.bq) == 1 and self.argument == 1:
a, b = self.ap
c = self.bq[0]
return gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
return hyperexpand(self)
@property
def argument(self):
""" Argument of the hypergeometric function. """
return self.args[2]
@property
def ap(self):
""" Numerator parameters of the hypergeometric function. """
return self.args[0]
@property
def bq(self):
""" Denominator parameters of the hypergeometric function. """
return self.args[1]
@property
def _diffargs(self):
return self.ap + self.bq
@property
def eta(self):
""" A quantity related to the convergence of the series. """
return sum(self.ap) - sum(self.bq)
@property
def radius_of_convergence(self):
"""
Compute the radius of convergence of the defining series.
Note that even if this is not oo, the function may still be evaluated
outside of the radius of convergence by analytic continuation. But if
this is zero, then the function is not actually defined anywhere else.
>>> from sympy.functions import hyper
>>> from sympy.abc import z
>>> hyper((1, 2), [3], z).radius_of_convergence
1
>>> hyper((1, 2, 3), [4], z).radius_of_convergence
0
>>> hyper((1, 2), (3, 4), z).radius_of_convergence
oo
"""
if any(a.is_integer and a <= 0 for a in self.ap + self.bq):
aints = [a for a in self.ap if a.is_Integer and a <= 0]
bints = [a for a in self.bq if a.is_Integer and a <= 0]
if len(aints) < len(bints):
return S(0)
popped = False
for b in bints:
cancelled = False
while aints:
a = aints.pop()
if a >= b:
cancelled = True
break
popped = True
if not cancelled:
return S(0)
if aints or popped:
# There are still non-positive numerator parameters.
# This is a polynomial.
return oo
if len(self.ap) == len(self.bq) + 1:
return S(1)
elif len(self.ap) <= len(self.bq):
return oo
else:
return S(0)
@property
def convergence_statement(self):
""" Return a condition on z under which the series converges. """
from sympy import And, Or, re, Ne, oo
R = self.radius_of_convergence
if R == 0:
return False
if R == oo:
return True
# The special functions and their approximations, page 44
e = self.eta
z = self.argument
c1 = And(re(e) < 0, abs(z) <= 1)
c2 = And(0 <= re(e), re(e) < 1, abs(z) <= 1, Ne(z, 1))
c3 = And(re(e) >= 1, abs(z) < 1)
return Or(c1, c2, c3)
class meijerg(TupleParametersBase):
r"""
The Meijer G-function is defined by a Mellin-Barnes type integral that
resembles an inverse Mellin transform. It generalizes the hypergeometric
functions.
The Meijer G-function depends on four sets of parameters. There are
"*numerator parameters*"
:math:`a_1, \dots, a_n` and :math:`a_{n+1}, \dots, a_p`, and there are
"*denominator parameters*"
:math:`b_1, \dots, b_m` and :math:`b_{m+1}, \dots, b_q`.
Confusingly, it is traditionally denoted as follows (note the position
of `m`, `n`, `p`, `q`, and how they relate to the lengths of the four
parameter vectors):
.. math ::
G_{p,q}^{m,n} \left(\begin{matrix}a_1, \dots, a_n & a_{n+1}, \dots, a_p \\
b_1, \dots, b_m & b_{m+1}, \dots, b_q
\end{matrix} \middle| z \right).
However, in sympy the four parameter vectors are always available
separately (see examples), so that there is no need to keep track of the
decorating sub- and super-scripts on the G symbol.
The G function is defined as the following integral:
.. math ::
\frac{1}{2 \pi i} \int_L \frac{\prod_{j=1}^m \Gamma(b_j - s)
\prod_{j=1}^n \Gamma(1 - a_j + s)}{\prod_{j=m+1}^q \Gamma(1- b_j +s)
\prod_{j=n+1}^p \Gamma(a_j - s)} z^s \mathrm{d}s,
where :math:`\Gamma(z)` is the gamma function. There are three possible
contours which we will not describe in detail here (see the references).
If the integral converges along more than one of them the definitions
agree. The contours all separate the poles of :math:`\Gamma(1-a_j+s)`
from the poles of :math:`\Gamma(b_k-s)`, so in particular the G function
is undefined if :math:`a_j - b_k \in \mathbb{Z}_{>0}` for some
:math:`j \le n` and :math:`k \le m`.
The conditions under which one of the contours yields a convergent integral
are complicated and we do not state them here, see the references.
Note: Currently the Meijer G-function constructor does *not* check any
convergence conditions.
Examples
========
You can pass the parameters either as four separate vectors:
>>> from sympy.functions import meijerg
>>> from sympy.abc import x, a
>>> from sympy.core.containers import Tuple
>>> from sympy import pprint
>>> pprint(meijerg((1, 2), (a, 4), (5,), [], x), use_unicode=False)
__1, 2 /1, 2 a, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
or as two nested vectors:
>>> pprint(meijerg([(1, 2), (3, 4)], ([5], Tuple()), x), use_unicode=False)
__1, 2 /1, 2 3, 4 | \
/__ | | x|
\_|4, 1 \ 5 | /
As with the hypergeometric function, the parameters may be passed as
arbitrary iterables. Vectors of length zero and one also have to be
passed as iterables. The parameters need not be constants, but if they
depend on the argument then not much implemented functionality should be
expected.
All the subvectors of parameters are available:
>>> from sympy import pprint
>>> g = meijerg([1], [2], [3], [4], x)
>>> pprint(g, use_unicode=False)
__1, 1 /1 2 | \
/__ | | x|
\_|2, 2 \3 4 | /
>>> g.an
(1,)
>>> g.ap
(1, 2)
>>> g.aother
(2,)
>>> g.bm
(3,)
>>> g.bq
(3, 4)
>>> g.bother
(4,)
The Meijer G-function generalizes the hypergeometric functions.
In some cases it can be expressed in terms of hypergeometric functions,
using Slater's theorem. For example:
>>> from sympy import hyperexpand
>>> from sympy.abc import a, b, c
>>> hyperexpand(meijerg([a], [], [c], [b], x), allow_hyper=True)
x**c*gamma(-a + c + 1)*hyper((-a + c + 1,),
(-b + c + 1,), -x)/gamma(-b + c + 1)
Thus the Meijer G-function also subsumes many named functions as special
cases. You can use expand_func or hyperexpand to (try to) rewrite a
Meijer G-function in terms of named special functions. For example:
>>> from sympy import expand_func, S
>>> expand_func(meijerg([[],[]], [[0],[]], -x))
exp(x)
>>> hyperexpand(meijerg([[],[]], [[S(1)/2],[0]], (x/2)**2))
sin(x)/sqrt(pi)
See Also
========
hyper
sympy.simplify.hyperexpand
References
==========
- Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
- http://en.wikipedia.org/wiki/Meijer_G-function
"""
nargs = 3
def __new__(cls, *args):
if len(args) == 5:
args = [(args[0], args[1]), (args[2], args[3]), args[4]]
if len(args) != 3:
raise TypeError("args must eiter be as, as', bs, bs', z or "
"as, bs, z")
def tr(p):
if len(p) != 2:
raise TypeError("wrong argument")
return Tuple(_prep_tuple(p[0]), _prep_tuple(p[1]))
# TODO should we check convergence conditions?
return Function.__new__(cls, tr(args[0]), tr(args[1]), args[2])
def fdiff(self, argindex=3):
if argindex != 3:
return self._diff_wrt_parameter(argindex[1])
if len(self.an) >= 1:
a = list(self.an)
a[0] -= 1
G = meijerg(a, self.aother, self.bm, self.bother, self.argument)
return 1/self.argument * ((self.an[0] - 1)*self + G)
elif len(self.bm) >= 1:
b = list(self.bm)
b[0] += 1
G = meijerg(self.an, self.aother, b, self.bother, self.argument)
return 1/self.argument * (self.bm[0]*self - G)
else:
return S.Zero
def _diff_wrt_parameter(self, idx):
# Differentiation wrt a parameter can only be done in very special
# cases. In particular, if we want to differentiate with respect to
# `a`, all other gamma factors have to reduce to rational functions.
#
# Let MT denote mellin transform. Suppose T(-s) is the gamma factor
# appearing in the definition of G. Then
#
# MT(log(z)G(z)) = d/ds T(s) = d/da T(s) + ...
#
# Thus d/da G(z) = log(z)G(z) - ...
# The ... can be evaluated as a G function under the above conditions,
# the formula being most easily derived by using
#
# d Gamma(s + n) Gamma(s + n) / 1 1 1 \
# -- ------------ = ------------ | - + ---- + ... + --------- |
# ds Gamma(s) Gamma(s) \ s s + 1 s + n - 1 /
#
# which follows from the difference equation of the digamma function.
# (There is a similar equation for -n instead of +n).
# We first figure out how to pair the parameters.
an = list(self.an)
ap = list(self.aother)
bm = list(self.bm)
bq = list(self.bother)
if idx < len(an):
an.pop(idx)
else:
idx -= len(an)
if idx < len(ap):
ap.pop(idx)
else:
idx -= len(ap)
if idx < len(bm):
bm.pop(idx)
else:
bq.pop(idx - len(bm))
pairs1 = []
pairs2 = []
for l1, l2, pairs in [(an, bq, pairs1), (ap, bm, pairs2)]:
while l1:
x = l1.pop()
found = None
for i, y in enumerate(l2):
if not Mod((x - y).simplify(), 1):
found = i
break
if found is None:
raise NotImplementedError('Derivative not expressible '
'as G-function?')
y = l2[i]
l2.pop(i)
pairs.append((x, y))
# Now build the result.
res = log(self.argument)*self
for a, b in pairs1:
sign = 1
n = a - b
base = b
if n < 0:
sign = -1
n = b - a
base = a
for k in range(n):
res -= sign*meijerg(self.an + (base + k + 1,), self.aother,
self.bm, self.bother + (base + k + 0,),
self.argument)
for a, b in pairs2:
sign = 1
n = b - a
base = a
if n < 0:
sign = -1
n = a - b
base = b
for k in range(n):
res -= sign*meijerg(self.an, self.aother + (base + k + 1,),
self.bm + (base + k + 0,), self.bother,
self.argument)
return res
def get_period(self):
"""
Return a number P such that G(x*exp(I*P)) == G(x).
>>> from sympy.functions.special.hyper import meijerg
>>> from sympy.abc import z
>>> from sympy import pi, S
>>> meijerg([1], [], [], [], z).get_period()
2*pi
>>> meijerg([pi], [], [], [], z).get_period()
oo
>>> meijerg([1, 2], [], [], [], z).get_period()
oo
>>> meijerg([1,1], [2], [1, S(1)/2, S(1)/3], [1], z).get_period()
12*pi
"""
# This follows from slater's theorem.
def compute(l):
# first check that no two differ by an integer
for i, b in enumerate(l):
if not b.is_Rational:
return oo
for j in range(i + 1, len(l)):
if not Mod((b - l[j]).simplify(), 1):
return oo
return reduce(ilcm, (x.q for x in l), 1)
beta = compute(self.bm)
alpha = compute(self.an)
p, q = len(self.ap), len(self.bq)
if p == q:
if beta == oo or alpha == oo:
return oo
return 2*pi*ilcm(alpha, beta)
elif p < q:
return 2*pi*beta
else:
return 2*pi*alpha
def _eval_expand_func(self, **hints):
from sympy import hyperexpand
return hyperexpand(self)
def _eval_evalf(self, prec):
# The default code is insufficient for polar arguments.
# mpmath provides an optional argument "r", which evaluates
# G(z**(1/r)). I am not sure what its intended use is, but we hijack it
# here in the following way: to evaluate at a number z of |argument|
# less than (say) n*pi, we put r=1/n, compute z' = root(z, n)
# (carefully so as not to loose the branch information), and evaluate
# G(z'**(1/r)) = G(z'**n) = G(z).
from sympy.functions import exp_polar, ceiling
from sympy import mpmath, Expr
z = self.argument
znum = self.argument._eval_evalf(prec)
if znum.has(exp_polar):
znum, branch = znum.as_coeff_mul(exp_polar)
if len(branch) != 1:
return
branch = branch[0].args[0]/I
else:
branch = S(0)
n = ceiling(abs(branch/S.Pi)) + 1
znum = znum**(S(1)/n)*exp(I*branch / n)
#print znum, branch, n
# Convert all args to mpf or mpc
try:
[z, r, ap, bq] = [arg._to_mpmath(prec)
for arg in [znum, 1/n, self.args[0], self.args[1]]]
except ValueError:
return
# Set mpmath precision and apply. Make sure precision is restored
# afterwards
orig = mpmath.mp.prec
try:
mpmath.mp.prec = prec
v = mpmath.meijerg(ap, bq, z, r)
#print ap, bq, z, r, v
finally:
mpmath.mp.prec = orig
return Expr._from_mpmath(v, prec)
def integrand(self, s):
""" Get the defining integrand D(s). """
from sympy import gamma
return self.argument**s \
* Mul(*(gamma(b - s) for b in self.bm)) \
* Mul(*(gamma(1 - a + s) for a in self.an)) \
/ Mul(*(gamma(1 - b + s) for b in self.bother)) \
/ Mul(*(gamma(a - s) for a in self.aother))
@property
def argument(self):
""" Argument of the Meijer G-function. """
return self.args[2]
@property
def an(self):
""" First set of numerator parameters. """
return self.args[0][0]
@property
def ap(self):
""" Combined numerator parameters. """
return self.args[0][0] + self.args[0][1]
@property
def aother(self):
""" Second set of numerator parameters. """
return self.args[0][1]
@property
def bm(self):
""" First set of denominator parameters. """
return self.args[1][0]
@property
def bq(self):
""" Combined denominator parameters. """
return self.args[1][0] + self.args[1][1]
@property
def bother(self):
""" Second set of denominator parameters. """
return self.args[1][1]
@property
def _diffargs(self):
return self.ap + self.bq
@property
def nu(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return sum(self.bq) - sum(self.ap)
@property
def delta(self):
""" A quantity related to the convergence region of the integral,
c.f. references. """
return len(self.bm) + len(self.an) - S(len(self.ap) + len(self.bq))/2
class HyperRep(Function):
"""
A base class for "hyper representation functions".
This is used exclusively in hyperexpand(), but fits more logically here.
pFq is branched at 1 if p == q+1. For use with slater-expansion, we want
define an "analytic continuation" to all polar numbers, which is
continuous on circles and on the ray t*exp_polar(I*pi). Moreover, we want
a "nice" expression for the various cases.
This base class contains the core logic, concrete derived classes only
supply the actual functions.
"""
nargs = 1
@classmethod
def eval(cls, *args):
from sympy import unpolarify
nargs = tuple(map(unpolarify, args[:-1])) + args[-1:]
if args != nargs:
return cls(*nargs)
@classmethod
def _expr_small(cls, x):
""" An expression for F(x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_small_minus(cls, x):
""" An expression for F(-x) which holds for |x| < 1. """
raise NotImplementedError
@classmethod
def _expr_big(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n)*x), |x| > 1. """
raise NotImplementedError
@classmethod
def _expr_big_minus(cls, x, n):
""" An expression for F(exp_polar(2*I*pi*n + pi*I)*x), |x| > 1. """
raise NotImplementedError
def _eval_rewrite_as_nonrep(self, *args):
from sympy import Piecewise
x, n = self.args[-1].extract_branch_factor(allow_half=True)
minus = False
nargs = self.args[:-1] + (x,)
if not n.is_Integer:
minus = True
n -= S(1)/2
nnargs = nargs + (n,)
if minus:
small = self._expr_small_minus(*nargs)
big = self._expr_big_minus(*nnargs)
else:
small = self._expr_small(*nargs)
big = self._expr_big(*nnargs)
if big == small:
return small
return Piecewise((big, abs(x) > 1), (small, True))
def _eval_rewrite_as_nonrepsmall(self, *args):
x, n = self.args[-1].extract_branch_factor(allow_half=True)
args = self.args[:-1] + (x,)
if not n.is_Integer:
return self._expr_small_minus(*args)
return self._expr_small(*args)
class HyperRep_power1(HyperRep):
""" Return a representative for hyper([-a], [], z) == (1 - z)**a. """
nargs = 2
@classmethod
def _expr_small(cls, a, x):
return (1 - x)**a
@classmethod
def _expr_small_minus(cls, a, x):
return (1 + x)**a
@classmethod
def _expr_big(cls, a, x, n):
if a.is_integer:
return cls._expr_small(a, x)
return (x - 1)**a*exp((2*n - 1)*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
if a.is_integer:
return cls._expr_small_minus(a, x)
return (1 + x)**a*exp(2*n*pi*I*a)
class HyperRep_power2(HyperRep):
""" Return a representative for hyper([a, a - 1/2], [2*a], z). """
nargs = 2
@classmethod
def _expr_small(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 - x))**(1 - 2*a)
@classmethod
def _expr_small_minus(cls, a, x):
return 2**(2*a - 1)*(1 + sqrt(1 + x))**(1 - 2*a)
@classmethod
def _expr_big(cls, a, x, n):
sgn = -1
if n.is_odd:
sgn = 1
n -= 1
return 2**(2*a - 1)*(1 + sgn*I*sqrt(x - 1))**(1 - 2*a) \
*exp(-2*n*pi*I*a)
@classmethod
def _expr_big_minus(cls, a, x, n):
sgn = 1
if n.is_odd:
sgn = -1
return sgn*2**(2*a - 1)*(sqrt(1 + x) + sgn)**(1 - 2*a)*exp(-2*pi*I*a*n)
class HyperRep_log1(HyperRep):
""" Represent -z*hyper([1, 1], [2], z) == log(1 - z). """
@classmethod
def _expr_small(cls, x):
return log(1 - x)
@classmethod
def _expr_small_minus(cls, x):
return log(1 + x)
@classmethod
def _expr_big(cls, x, n):
return log(x - 1) + (2*n - 1)*pi*I
@classmethod
def _expr_big_minus(cls, x, n):
return log(1 + x) + 2*n*pi*I
class HyperRep_atanh(HyperRep):
""" Represent hyper([1/2, 1], [3/2], z) == atanh(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, x):
return atanh(sqrt(x))/sqrt(x)
def _expr_small_minus(cls, x):
return atan(sqrt(x))/sqrt(x)
def _expr_big(cls, x, n):
if n.is_even:
return (acoth(sqrt(x)) + I*pi/2)/sqrt(x)
else:
return (acoth(sqrt(x)) - I*pi/2)/sqrt(x)
def _expr_big_minus(cls, x, n):
if n.is_even:
return atan(sqrt(x))/sqrt(x)
else:
return (atan(sqrt(x)) - pi)/sqrt(x)
class HyperRep_asin1(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z))/sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z))/sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S(-1)**n*((S(1)/2 - n)*pi/sqrt(z) + I*acosh(sqrt(z))/sqrt(z))
@classmethod
def _expr_big_minus(cls, z, n):
return S(-1)**n*(asinh(sqrt(z))/sqrt(z) + n*pi*I/sqrt(z))
class HyperRep_asin2(HyperRep):
""" Represent hyper([1, 1], [3/2], z) == asin(sqrt(z))/sqrt(z)/sqrt(1-z). """
# TODO this can be nicer
@classmethod
def _expr_small(cls, z):
return HyperRep_asin1._expr_small(z) \
/HyperRep_power1._expr_small(S(1)/2, z)
@classmethod
def _expr_small_minus(cls, z):
return HyperRep_asin1._expr_small_minus(z) \
/HyperRep_power1._expr_small_minus(S(1)/2, z)
@classmethod
def _expr_big(cls, z, n):
return HyperRep_asin1._expr_big(z, n) \
/HyperRep_power1._expr_big(S(1)/2, z, n)
@classmethod
def _expr_big_minus(cls, z, n):
return HyperRep_asin1._expr_big_minus(z, n) \
/HyperRep_power1._expr_big_minus(S(1)/2, z, n)
class HyperRep_sqrts1(HyperRep):
""" Return a representative for hyper([-a, 1/2 - a], [1/2], z). """
nargs = 2
@classmethod
def _expr_small(cls, a, z):
return ((1 - sqrt(z))**(2*a) + (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return (1 + z)**a*cos(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return ((sqrt(z) + 1)**(2*a)*exp(2*pi*I*n*a) +
(sqrt(z) - 1)**(2*a)*exp(2*pi*I*(n - 1)*a))/2
else:
n -= 1
return ((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) +
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))/2
@classmethod
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*cos(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_sqrts2(HyperRep):
""" Return a representative for
sqrt(z)/2*[(1-sqrt(z))**2a - (1 + sqrt(z))**2a]
== -2*z/(2*a+1) d/dz hyper([-a - 1/2, -a], [1/2], z)"""
nargs = 2
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)*((1 - sqrt(z))**(2*a) - (1 + sqrt(z))**(2*a))/2
@classmethod
def _expr_small_minus(cls, a, z):
return sqrt(z)*(1 + z)**a*sin(2*a*atan(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
if n.is_even:
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n - 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
else:
n -= 1
return sqrt(z)/2*((sqrt(z) - 1)**(2*a)*exp(2*pi*I*a*(n + 1)) -
(sqrt(z) + 1)**(2*a)*exp(2*pi*I*a*n))
def _expr_big_minus(cls, a, z, n):
if n.is_even:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z)*sin(2*a*atan(sqrt(z)))
else:
return (1 + z)**a*exp(2*pi*I*n*a)*sqrt(z) \
*sin(2*a*atan(sqrt(z)) - 2*pi*a)
class HyperRep_log2(HyperRep):
""" Represent log(1/2 + sqrt(1 - z)/2) == -z/4*hyper([3/2, 1, 1], [2, 2], z) """
@classmethod
def _expr_small(cls, z):
return log(S(1)/2 + sqrt(1 - z)/2)
@classmethod
def _expr_small_minus(cls, z):
return log(S(1)/2 + sqrt(1 + z)/2)
@classmethod
def _expr_big(cls, z, n):
if n.is_even:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) + I*asin(1/sqrt(z))
else:
return (n - S(1)/2)*pi*I + log(sqrt(z)/2) - I*asin(1/sqrt(z))
def _expr_big_minus(cls, z, n):
if n.is_even:
return pi*I*n + log(S(1)/2 + sqrt(1 + z)/2)
else:
return pi*I*n + log(sqrt(1 + z)/2 - S(1)/2)
class HyperRep_cosasin(HyperRep):
""" Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """
# Note there are many alternative expressions, e.g. as powers of a sum of
# square roots.
nargs = 2
@classmethod
def _expr_small(cls, a, z):
return cos(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return cosh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return cosh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return cosh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
class HyperRep_sinasin(HyperRep):
""" Represent 2*a*z*hyper([1 - a, 1 + a], [3/2], z)
== sqrt(z)/sqrt(1-z)*sin(2*a*asin(sqrt(z))) """
nargs = 2
@classmethod
def _expr_small(cls, a, z):
return sqrt(z)/sqrt(1 - z)*sin(2*a*asin(sqrt(z)))
@classmethod
def _expr_small_minus(cls, a, z):
return -sqrt(z)/sqrt(1 + z)*sinh(2*a*asinh(sqrt(z)))
@classmethod
def _expr_big(cls, a, z, n):
return -1/sqrt(1 - 1/z)*sinh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1))
@classmethod
def _expr_big_minus(cls, a, z, n):
return -1/sqrt(1 + 1/z)*sinh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
| {
"content_hash": "d11f38decf0e78eaff64b6ab3ef17d10",
"timestamp": "",
"source": "github",
"line_count": 1014,
"max_line_length": 84,
"avg_line_length": 32.3698224852071,
"alnum_prop": 0.526673369283734,
"repo_name": "amitjamadagni/sympy",
"id": "9bab7d8deb52c11b72a79a61a5962c3bb0cf1954",
"size": "32823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/functions/special/hyper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12199014"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "287"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1PolicyRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'verbs': 'list[str]',
'attribute_restrictions': 'str',
'api_groups': 'list[str]',
'resources': 'list[str]',
'resource_names': 'list[str]',
'non_resource_ur_ls': 'list[str]'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'verbs': 'verbs',
'attribute_restrictions': 'attributeRestrictions',
'api_groups': 'apiGroups',
'resources': 'resources',
'resource_names': 'resourceNames',
'non_resource_ur_ls': 'nonResourceURLs'
}
def __init__(self, verbs=None, attribute_restrictions=None, api_groups=None, resources=None, resource_names=None, non_resource_ur_ls=None):
"""
V1PolicyRule - a model defined in Swagger
"""
self._verbs = verbs
self._attribute_restrictions = attribute_restrictions
self._api_groups = api_groups
self._resources = resources
self._resource_names = resource_names
self._non_resource_ur_ls = non_resource_ur_ls
@property
def verbs(self):
"""
Gets the verbs of this V1PolicyRule.
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
:return: The verbs of this V1PolicyRule.
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""
Sets the verbs of this V1PolicyRule.
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
:param verbs: The verbs of this V1PolicyRule.
:type: list[str]
"""
self._verbs = verbs
@property
def attribute_restrictions(self):
"""
Gets the attribute_restrictions of this V1PolicyRule.
AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
:return: The attribute_restrictions of this V1PolicyRule.
:rtype: str
"""
return self._attribute_restrictions
@attribute_restrictions.setter
def attribute_restrictions(self, attribute_restrictions):
"""
Sets the attribute_restrictions of this V1PolicyRule.
AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.
:param attribute_restrictions: The attribute_restrictions of this V1PolicyRule.
:type: str
"""
self._attribute_restrictions = attribute_restrictions
@property
def api_groups(self):
"""
Gets the api_groups of this V1PolicyRule.
APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed
:return: The api_groups of this V1PolicyRule.
:rtype: list[str]
"""
return self._api_groups
@api_groups.setter
def api_groups(self, api_groups):
"""
Sets the api_groups of this V1PolicyRule.
APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed
:param api_groups: The api_groups of this V1PolicyRule.
:type: list[str]
"""
self._api_groups = api_groups
@property
def resources(self):
"""
Gets the resources of this V1PolicyRule.
Resources is a list of resources this rule applies to. ResourceAll represents all resources.
:return: The resources of this V1PolicyRule.
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""
Sets the resources of this V1PolicyRule.
Resources is a list of resources this rule applies to. ResourceAll represents all resources.
:param resources: The resources of this V1PolicyRule.
:type: list[str]
"""
self._resources = resources
@property
def resource_names(self):
"""
Gets the resource_names of this V1PolicyRule.
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
:return: The resource_names of this V1PolicyRule.
:rtype: list[str]
"""
return self._resource_names
@resource_names.setter
def resource_names(self, resource_names):
"""
Sets the resource_names of this V1PolicyRule.
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
:param resource_names: The resource_names of this V1PolicyRule.
:type: list[str]
"""
self._resource_names = resource_names
@property
def non_resource_ur_ls(self):
"""
Gets the non_resource_ur_ls of this V1PolicyRule.
NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
:return: The non_resource_ur_ls of this V1PolicyRule.
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""
Sets the non_resource_ur_ls of this V1PolicyRule.
NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.
:param non_resource_ur_ls: The non_resource_ur_ls of this V1PolicyRule.
:type: list[str]
"""
self._non_resource_ur_ls = non_resource_ur_ls
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1PolicyRule.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "6408edc596dacf23fe33d9f71fb96d15",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 315,
"avg_line_length": 35.251937984496124,
"alnum_prop": 0.634524463991204,
"repo_name": "detiber/lib_openshift",
"id": "2862a6f860e2c72501b4a6b01bfd6ef79fb207eb",
"size": "9112",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib_openshift/models/v1_policy_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
from PyQt5 import Qt
from ..shared_resources import UNIQUE_QGRAPHICSITEM_TYPE
from .simple_point_picker import SimplePointPicker
class SimplePolyLinePointPicker(SimplePointPicker):
QGRAPHICSITEM_TYPE = UNIQUE_QGRAPHICSITEM_TYPE()
def __init__(self, general_view, parent_item, points=None):
self.line_items = []
self.line_pen = Qt.QPen(Qt.Qt.green)
self.line_pen.setWidth(5)
self.line_pen.setCosmetic(True)
super().__init__(general_view, parent_item, points)
self.point_item_position_has_changed.connect(self._on_point_item_position_has_changed)
self.point_item_list_content_reset.connect(self._on_point_item_list_content_reset)
self._ignore_point_item_position_changed = False
def make_and_store_point_item(self, pos):
self._ignore_point_item_position_changed = True
try:
super().make_and_store_point_item(pos)
finally:
self._ignore_point_item_position_changed = False
if len(self.point_items) > 1:
p1 = self.point_items[-2].pos()
line_item = Qt.QGraphicsLineItem(Qt.QLineF(p1, pos), self.parentItem())
line_item.setPen(self.line_pen)
line_item.installSceneEventFilter(self)
line_item.setZValue(-1)
self.line_items.append(line_item)
def sceneEventFilter(self, watched, event):
is_line_click = (
isinstance(watched, Qt.QGraphicsLineItem) and
event.type() == Qt.QEvent.GraphicsSceneMousePress and
event.button() == Qt.Qt.LeftButton and
event.modifiers() == Qt.Qt.NoModifier
)
if is_line_click:
for point_item in self.point_items:
point_item.setSelected(True)
# Focus a point item so that the delete key shortcut works
self.point_items[0].setFocus()
return True
return super().sceneEventFilter(watched, event)
def _on_point_item_position_has_changed(self, point_item):
if not self._ignore_point_item_position_changed:
idx = self.point_items.index(point_item)
if idx > 0:
line_item = self.line_items[idx - 1]
line = line_item.line()
line.setP2(point_item.pos())
line_item.setLine(line)
if idx < len(self.point_items) - 1:
line_item = self.line_items[idx]
line = line_item.line()
line.setP1(point_item.pos())
line_item.setLine(line)
def _on_point_item_list_content_reset(self):
for line_item in self.line_items:
self.view.scene().removeItem(line_item)
self.line_items = []
if len(self.point_items) > 1:
for point_item1, point_item2 in zip(self.point_items, self.point_items[1:]):
line_item = Qt.QGraphicsLineItem(Qt.QLineF(point_item1.pos(), point_item2.pos()), self.parentItem())
line_item.setPen(self.line_pen)
line_item.installSceneEventFilter(self)
line_item.setZValue(-1)
self.line_items.append(line_item) | {
"content_hash": "3d9a7a37a1631fa7a96bd56b1cdbde28",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 116,
"avg_line_length": 44.66197183098591,
"alnum_prop": 0.6039104383475244,
"repo_name": "erikhvatum/RisWidget",
"id": "4326e219c39da27b4e4a6897374e608737dabb25",
"size": "4334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ris_widget/examples/simple_poly_line_point_picker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "276275"
},
{
"name": "GLSL",
"bytes": "7012"
},
{
"name": "Python",
"bytes": "600996"
},
{
"name": "QML",
"bytes": "2311"
}
],
"symlink_target": ""
} |
import copy
import optparse
import re
from pylint import utils
# pylint: disable=unused-argument
def _csv_validator(_, name, value):
return utils._check_csv(value)
# pylint: disable=unused-argument
def _regexp_validator(_, name, value):
if hasattr(value, "pattern"):
return value
return re.compile(value)
# pylint: disable=unused-argument
def _regexp_csv_validator(_, name, value):
return [_regexp_validator(_, name, val) for val in _csv_validator(_, name, value)]
def _choice_validator(choices, name, value):
if value not in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, value, choices))
return value
def _yn_validator(opt, _, value):
if isinstance(value, int):
return bool(value)
if value in ("y", "yes"):
return True
if value in ("n", "no"):
return False
msg = "option %s: invalid yn value %r, should be in (y, yes, n, no)"
raise optparse.OptionValueError(msg % (opt, value))
def _multiple_choice_validator(choices, name, value):
values = utils._check_csv(value)
for csv_value in values:
if csv_value not in choices:
msg = "option %s: invalid value: %r, should be in %s"
raise optparse.OptionValueError(msg % (name, csv_value, choices))
return values
def _non_empty_string_validator(opt, _, value):
if not value:
msg = "indent string can't be empty."
raise optparse.OptionValueError(msg)
return utils._unquote(value)
def _multiple_choices_validating_option(opt, name, value):
return _multiple_choice_validator(opt.choices, name, value)
def _py_version_validator(_, name, value):
if not isinstance(value, tuple):
try:
value = tuple(int(val) for val in value.split("."))
except (ValueError, AttributeError):
raise optparse.OptionValueError(f"Invalid format for {name}") from None
return value
VALIDATORS = {
"string": utils._unquote,
"int": int,
"float": float,
"regexp": re.compile,
"regexp_csv": _regexp_csv_validator,
"csv": _csv_validator,
"yn": _yn_validator,
"choice": lambda opt, name, value: _choice_validator(opt["choices"], name, value),
"multiple_choice": lambda opt, name, value: _multiple_choice_validator(
opt["choices"], name, value
),
"non_empty_string": _non_empty_string_validator,
"py_version": _py_version_validator,
}
def _call_validator(opttype, optdict, option, value):
if opttype not in VALIDATORS:
raise Exception('Unsupported type "%s"' % opttype)
try:
return VALIDATORS[opttype](optdict, option, value)
except TypeError:
try:
return VALIDATORS[opttype](value)
except Exception as e:
raise optparse.OptionValueError(
f"{option} value ({value!r}) should be of type {opttype}"
) from e
def _validate(value, optdict, name=""):
"""return a validated value for an option according to its type
optional argument name is only used for error message formatting
"""
try:
_type = optdict["type"]
except KeyError:
return value
return _call_validator(_type, optdict, name, value)
# pylint: disable=no-member
class Option(optparse.Option):
TYPES = optparse.Option.TYPES + (
"regexp",
"regexp_csv",
"csv",
"yn",
"multiple_choice",
"non_empty_string",
"py_version",
)
ATTRS = optparse.Option.ATTRS + ["hide", "level"]
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["regexp"] = _regexp_validator
TYPE_CHECKER["regexp_csv"] = _regexp_csv_validator
TYPE_CHECKER["csv"] = _csv_validator
TYPE_CHECKER["yn"] = _yn_validator
TYPE_CHECKER["multiple_choice"] = _multiple_choices_validating_option
TYPE_CHECKER["non_empty_string"] = _non_empty_string_validator
TYPE_CHECKER["py_version"] = _py_version_validator
def __init__(self, *opts, **attrs):
optparse.Option.__init__(self, *opts, **attrs)
if hasattr(self, "hide") and self.hide:
self.help = optparse.SUPPRESS_HELP
def _check_choice(self):
if self.type in ("choice", "multiple_choice"):
if self.choices is None:
raise optparse.OptionError(
"must supply a list of choices for type 'choice'", self
)
if not isinstance(self.choices, (tuple, list)):
raise optparse.OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1],
self,
)
elif self.choices is not None:
raise optparse.OptionError(
"must not supply choices for type %r" % self.type, self
)
# pylint: disable=unsupported-assignment-operation
optparse.Option.CHECK_METHODS[2] = _check_choice # type: ignore
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
if self.type == "named":
existent = getattr(values, self.dest)
if existent:
existent.update(value)
value = existent
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(self.action, self.dest, opt, value, values, parser)
| {
"content_hash": "0e80fe4d79d7a90ded5f436f81694500",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 86,
"avg_line_length": 32.797687861271676,
"alnum_prop": 0.6129714487134297,
"repo_name": "ruchee/vimrc",
"id": "5994333ec62e0743f8d7d31b5a58e87a18a7fe0e",
"size": "5823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/pylint/config/option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import pickle
class FileExistsError(Exception):
pass
def load_pickle(fp):
"""Returns python object from pickle at path fp"""
with open(fp, 'rb') as f:
return pickle.load(f)
def dump_pickle(pyobj, fp, overwrite=True):
"""Saves python object pyobj to path fp"""
if not overwrite:
import os.path
if os.path.isfile(fp):
raise FileExistsError(fp)
with open(fp, 'wb') as f:
pickle.dump(pyobj, f)
def load_json(fp):
"""Returns python object from json at path fp"""
with open(fp, 'r') as f:
return json.load(f)
def dump_json(pyobj, fp, overwrite=True, indent=None,
sort_keys=False):
"""Saves python object pyobj to relative path filename
Args:
pyobj: a dict, list, or combination thereof
fp: str, the filepath to save the json under
overwrite: boolean, whether to overwrite an existing file.
This does not pass silently.
indent: int, indent sent to the json.dumps() function
sort_keys: boolean, whether to do so
Returns:
None
Raises:
AssertionError if overwrite=False and path exists
"""
if not overwrite:
import os.path
if os.path.isfile(fp):
raise FileExistsError(fp)
with open(fp, 'w+') as f:
f.write(json.dumps(pyobj, indent=indent, sort_keys=sort_keys))
| {
"content_hash": "bde8acaf5a16998532b477db55df9c42",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 70,
"avg_line_length": 27.29824561403509,
"alnum_prop": 0.6323907455012854,
"repo_name": "Prooffreader/pyprooff",
"id": "f83384d74032c7ed116fe8324401061c3902f1d4",
"size": "1579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyprooff/inout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "56"
},
{
"name": "Python",
"bytes": "140423"
}
],
"symlink_target": ""
} |
import pytest
import pytest_trio.plugin # noqa
import trio
from pyee import TrioEventEmitter
class PyeeTestError(Exception):
pass
@pytest.mark.trio
async def test_trio_emit():
"""Test that the trio event emitter can handle wrapping
coroutines
"""
async with TrioEventEmitter() as ee:
should_call = trio.Event()
@ee.on("event")
async def event_handler():
should_call.set()
ee.emit("event")
result = False
with trio.move_on_after(0.1):
await should_call.wait()
result = True
assert result
@pytest.mark.trio
async def test_trio_once_emit():
"""Test that trio event emitters also wrap coroutines when
using once
"""
async with TrioEventEmitter() as ee:
should_call = trio.Event()
@ee.once("event")
async def event_handler():
should_call.set()
ee.emit("event")
result = False
with trio.move_on_after(0.1):
await should_call.wait()
result = True
assert result
@pytest.mark.trio
async def test_trio_error():
"""Test that trio event emitters can handle errors when
wrapping coroutines
"""
async with TrioEventEmitter() as ee:
send, rcv = trio.open_memory_channel(1)
@ee.on("event")
async def event_handler():
raise PyeeTestError()
@ee.on("error")
async def handle_error(exc):
async with send:
await send.send(exc)
ee.emit("event")
result = None
with trio.move_on_after(0.1):
async with rcv:
result = await rcv.__anext__()
assert isinstance(result, PyeeTestError)
@pytest.mark.trio
async def test_sync_error(event_loop):
"""Test that regular functions have the same error handling as coroutines"""
async with TrioEventEmitter() as ee:
send, rcv = trio.open_memory_channel(1)
@ee.on("event")
def sync_handler():
raise PyeeTestError()
@ee.on("error")
async def handle_error(exc):
async with send:
await send.send(exc)
ee.emit("event")
result = None
with trio.move_on_after(0.1):
async with rcv:
result = await rcv.__anext__()
assert isinstance(result, PyeeTestError)
| {
"content_hash": "dd5b603d794af22b97427f3c40b9338e",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 80,
"avg_line_length": 21.954545454545453,
"alnum_prop": 0.5722567287784679,
"repo_name": "jfhbrook/pyee",
"id": "387784917102d793d93b964d695956b211aaa4e7",
"size": "2440",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_trio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1610"
},
{
"name": "Python",
"bytes": "57915"
}
],
"symlink_target": ""
} |
descr = """A set of Python modules for functional MRI..."""
import sys
import os
from setuptools import setup, find_packages
def load_version():
"""Executes nistats/version.py in a globals dictionary and return it.
Note: importing nistats is not an option because there may be
dependencies like nibabel which are not installed and
setup.py is supposed to install them.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with open(os.path.join('nistats', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'nistats'
DESCRIPTION = 'Modeling and Statistical analysis of fMRI data in Python'
LONG_DESCRIPTION = open('README.rst').read()
MAINTAINER = 'Bertrand Thirion'
MAINTAINER_EMAIL = 'bertrand.thirion@inria.fr'
URL = 'http://nistats.github.io'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://nistats.github.io'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_nistats_installing=True)
install_requires = \
['%s>=%s' % (mod, meta['min_version'])
for mod, meta in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
if not meta['required_at_installation']]
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
package_data={'nistats.tests': ['*.nii.gz', '*.npz'],
#'nistats.description': ['*.rst'],
},
install_requires=install_requires,)
| {
"content_hash": "baa5749a57bc284a3ba564a398a123fd",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 73,
"avg_line_length": 36.39080459770115,
"alnum_prop": 0.6064434617814277,
"repo_name": "MartinPerez/nistats",
"id": "2dac9f524a53e5dbe7325052170072955a98e598",
"size": "3190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1326"
},
{
"name": "Python",
"bytes": "154337"
},
{
"name": "Shell",
"bytes": "3860"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from mathpy.combinatorics import binomial
from mathpy.distributions import continuous, discrete
from mathpy.linalgebra import lu, matrix, norm, qr
from mathpy.numerical import differentiation, integration, polynomial, roots
from mathpy.numtheory import constants, divisibility, exponentiation, factor, integers, pi, primes, sequences
from mathpy.random import random, sample
from mathpy.settheory import sets
from mathpy.special import factorial, gamma
from mathpy.stats import aov, fa, hypothesis, simulate, summary
| {
"content_hash": "1f8c646a3faca49129ddc28ebc62101b",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 109,
"avg_line_length": 51.63636363636363,
"alnum_prop": 0.8133802816901409,
"repo_name": "aschleg/mathpy",
"id": "64079d02c09672062b1c760bbd55e79af32cfc20",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathpy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "448416"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class MetricsConfig(AppConfig):
name = 'apps.metrics'
| {
"content_hash": "613c0e40a41308507979c57fd85fef38",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 18.8,
"alnum_prop": 0.7553191489361702,
"repo_name": "klashxx/PyConES2017",
"id": "6c8c2153885da998988623f35b6b449da664edcf",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/sysgate/apps/metrics/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21077"
},
{
"name": "HTML",
"bytes": "19432"
},
{
"name": "Python",
"bytes": "25071"
}
],
"symlink_target": ""
} |
"""Test OAuth functionality."""
#
# (C) Pywikibot team, 2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
import os
from pywikibot.login import OauthLoginManager
from tests.aspects import (
unittest,
require_modules,
TestCase,
DefaultSiteTestCase,
)
@require_modules('mwoauth')
class OAuthSiteTestCase(TestCase):
"""Run tests related to OAuth authentication."""
oauth = True
def _get_oauth_tokens(self):
"""Get valid OAuth tokens from environment variables."""
tokens_env = 'OAUTH_TOKENS_' + self.family.upper()
tokens = os.environ.get(tokens_env + '_' + self.code.upper(), None)
tokens = tokens or os.environ.get(tokens_env, None)
return tuple(tokens.split(':')) if tokens is not None else None
def setUp(self):
"""Check if OAuth extension is installed and OAuth tokens are set."""
super(OAuthSiteTestCase, self).setUp()
self.site = self.get_site()
if not self.site.has_extension('OAuth'):
raise unittest.SkipTest('OAuth extension not loaded on test site')
tokens = self._get_oauth_tokens()
if tokens is None:
raise unittest.SkipTest('OAuth tokens not set')
self.assertEqual(len(tokens), 4)
self.consumer_token = tokens[:2]
self.access_token = tokens[2:]
class DefaultOAuthSiteTestCase(DefaultSiteTestCase, OAuthSiteTestCase):
"""Default OAuth site test."""
pass
class TestOauthLoginManger(DefaultOAuthSiteTestCase):
"""Test OAuth login manager."""
def _get_login_manager(self):
login_manager = OauthLoginManager(self.consumer_token[1], False,
self.site, self.consumer_token[0])
# Set access token directly, discard user interaction token fetching
login_manager._access_token = self.access_token
return login_manager
def test_login(self):
"""Test login."""
login_manager = self._get_login_manager()
login_manager.login()
self.assertEqual(login_manager.consumer_token, self.consumer_token)
self.assertEqual(login_manager.access_token, self.access_token)
def test_identity(self):
"""Test identity."""
login_manager = self._get_login_manager()
self.assertIsNotNone(login_manager.access_token)
self.assertIsInstance(login_manager.identity, dict)
self.assertEqual(login_manager.identity['username'],
self.site.username(sysop=False))
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| {
"content_hash": "cfed54181df572f24eca662fefb81ac7",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 30.727272727272727,
"alnum_prop": 0.643491124260355,
"repo_name": "icyflame/batman",
"id": "a0bf0e01a410d584fcd548f7173d7e5bfed498f3",
"size": "2729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/oauth_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3922041"
}
],
"symlink_target": ""
} |
"""The purpose of this module is to take a weather data point (such as
air pressure or wind direction) and normalize/rescale it to a scalar
value [0.0, 1.0] and a distribution.
"""
import math
def normalize(data_type, series, scalar, absolute):
"""Normalize nature data.
data_type: string describing the data (such as
"wind-direction")
series: a list with all available data points.
scalar: the data point to normalize.
absolute: true to normalize absolutely.
"""
# TODO (bjorn): Rename this.
# TODO (bjorn): Return distribution.
# TODO (bjorn): Does it make sense to move the invertion step here?
length = max(series) - min(series)
shifted = scalar - min(series)
norm = shifted / length
if data_type == 'wind-direction':
if absolute:
norm = scalar / 360.0
elif data_type == 'wind-speed': # XXX
if absolute:
norm = scalar / 20.0
elif data_type == 'temperature': # XXX
if absolute:
# -20 to 40
temp = max(0, scalar + 20)
# 0 to 60
norm = temp / 60.0
elif data_type == 'pressure':
# world records are low 870 and high 1085.7
# standard atmosphere = 1013
if absolute:
norm = (scalar - 980) / 1040 # XXX
if norm < 0:
norm = 0
elif norm > 1.0:
norm = 1.0
elif data_type == 'cloud-cover':
if absolute:
norm = scalar / 3.0
elif data_type == 'rain':
if absolute:
norm = min(1.0, scalar / 10.0) # XXX
return norm
| {
"content_hash": "a9e456c1d5d0e9c3a5c4f6e4a21e2f5d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 71,
"avg_line_length": 30.296296296296298,
"alnum_prop": 0.5580684596577017,
"repo_name": "bjornedstrom/funkcast",
"id": "6f4382f3cd3405b5ed8c0c656bdfcff1b675d775",
"size": "1740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "funkcast/nature.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48514"
}
],
"symlink_target": ""
} |
import pytest
from fixture.application import Application
fixture = None
@pytest.fixture
def app(request):
global fixture
if fixture is None:
fixture = Application()
else:
if not fixture.is_valid():
fixture = Application()
fixture.session.ensure_login(username="admin", password="secret")
return fixture
@pytest.fixture(scope="session", autouse=True)
def stop(request):
def fin():
fixture.session.ensure_logout()
fixture.destroy()
request.addfinalizer(fin)
return fixture | {
"content_hash": "a46af7e9a5036dc69ad7dc9d64674f2a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 69,
"avg_line_length": 22.08,
"alnum_prop": 0.6721014492753623,
"repo_name": "denis-nuzhdin/test_py",
"id": "3cd78d68890708a301fb6b70ad51a1b23021892f",
"size": "552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19665"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_cache.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "3e74946a76a08707b992424cd54f97b8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.70995670995671,
"repo_name": "suselrd/django-social-graph",
"id": "93ae0ec495f1ea43887bfdc5819081e8b82615ce",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_cache/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "118004"
}
],
"symlink_target": ""
} |
from django.db import models
from django_comments.models import Comment
from django_comments.managers import CommentManager
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
PATH_SEPARATOR = getattr(settings, 'COMMENT_PATH_SEPARATOR', '/')
PATH_DIGITS = getattr(settings, 'COMMENT_PATH_DIGITS', 10)
class ThreadedComment(Comment):
title = models.TextField(_('Title'), blank=True)
parent = models.ForeignKey('self', null=True, blank=True, default=None, related_name='children', verbose_name=_('Parent'))
last_child = models.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL, verbose_name=_('Last child'))
tree_path = models.TextField(_('Tree path'), editable=False, db_index=True)
comment_html = models.TextField()
objects = CommentManager()
@property
def depth(self):
return len(self.tree_path.split(PATH_SEPARATOR))
@property
def root_id(self):
return int(self.tree_path.split(PATH_SEPARATOR)[0])
@property
def root_path(self):
return ThreadedComment.objects.filter(pk__in=self.tree_path.split(PATH_SEPARATOR)[:-1])
def save(self, *args, **kwargs):
skip_tree_path = kwargs.pop('skip_tree_path', False)
super(ThreadedComment, self).save(*args, **kwargs)
if skip_tree_path:
return None
tree_path = unicode(self.pk).zfill(PATH_DIGITS)
if self.parent:
tree_path = PATH_SEPARATOR.join((self.parent.tree_path, tree_path))
self.parent.last_child = self
ThreadedComment.objects.filter(pk=self.parent_id).update(last_child=self)
self.tree_path = tree_path
ThreadedComment.objects.filter(pk=self.pk).update(tree_path=self.tree_path)
def delete(self, *args, **kwargs):
# Fix last child on deletion.
if self.parent_id:
prev_child_id = ThreadedComment.objects.filter(parent=self.parent_id).exclude(pk=self.pk).order_by('-submit_date').values_list('pk', flat=True)[0]
ThreadedComment.objects.filter(pk=self.parent_id).update(last_child=prev_child_id)
super(ThreadedComment, self).delete(*args, **kwargs)
class Meta(object):
ordering = ('tree_path',)
db_table = 'threadedcomments_comment'
verbose_name = _('Threaded comment')
verbose_name_plural = _('Threaded comments')
| {
"content_hash": "512da445a3d5ace112faa2dfaac63810",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 158,
"avg_line_length": 41.275862068965516,
"alnum_prop": 0.6716791979949874,
"repo_name": "vlevit/vlevit.org",
"id": "bcbe6e6560f5b19a1d7393cdf4ccc048674fc210",
"size": "2394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "threadedcomments/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "23442"
},
{
"name": "Emacs Lisp",
"bytes": "160"
},
{
"name": "HTML",
"bytes": "66671"
},
{
"name": "JavaScript",
"bytes": "4598"
},
{
"name": "Lua",
"bytes": "10454"
},
{
"name": "Python",
"bytes": "137392"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
} |
"""
defines logger classes and few convinience methods, not related to the declarations
tree
"""
import os
import sys
import logging
import tempfile
from fs_utils import files_walker
from fs_utils import directories_walker
def _create_logger_( name ):
"""implementation details"""
logger = logging.getLogger(name)
handler = logging.StreamHandler()
#handler.setFormatter( logging.Formatter( os.linesep + '%(levelname)s %(message)s' ) )
handler.setFormatter( logging.Formatter( '%(levelname)s %(message)s' ) )
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
class loggers:
"""class-namespace, defines few loggers classes, used in the project"""
cxx_parser = _create_logger_( 'pygccxml.cxx_parser' )
"""logger for C++ parser functionality
If you set this logger level to DEBUG, you will be able to see the exact
command line, used to invoke GCC-XML and errors that occures during XML parsing
"""
gccxml = cxx_parser #backward compatability
pdb_reader = _create_logger_( 'pygccxml.pdb_reader' )
"""logger for MS .pdb file reader functionality
"""
queries_engine = _create_logger_( 'pygccxml.queries_engine' )
"""logger for query engine functionality.
If you set this logger level to DEBUG, you will be able to see what queries
you do against declarations tree, measure performance and may be even to improve it.
Query engine reports queries and whether they are optimized or not.
"""
declarations_cache = _create_logger_( 'pygccxml.declarations_cache' )
"""logger for declarations tree cache functionality
If you set this logger level to DEBUG, you will be able to see what is exactly
happens, when you read the declarations from cache file. You will be able to
decide, whether it worse for you to use this or that cache strategy.
"""
root = logging.getLogger( 'pygccxml' )
"""root logger exists for your convinience only"""
all = [ root, cxx_parser, queries_engine, declarations_cache, pdb_reader ]
"""contains all logger classes, defined by the class"""
def remove_file_no_raise(file_name ):
"""removes file from disk, if exception is raised, it silently ignores it"""
try:
if os.path.exists(file_name):
os.remove( file_name )
except Exception, error:
loggers.root.error( "Error ocured while removing temprorary created file('%s'): %s"
% ( file_name, str( error ) ) )
def create_temp_file_name(suffix, prefix=None, dir=None):
"""small convinience function that creates temporal file.
This function is a wrapper aroung Python built-in function - tempfile.mkstemp
"""
if not prefix:
prefix = tempfile.template
fd, name = tempfile.mkstemp( suffix=suffix, prefix=prefix, dir=dir )
file_obj = os.fdopen( fd )
file_obj.close()
return name
def normalize_path( some_path ):
"""return os.path.normpath( os.path.normcase( some_path ) )"""
return os.path.normpath( os.path.normcase( some_path ) )
def get_architecture():
"""returns computer architecture: 32 or 64.
The guess is based on maxint.
"""
if sys.maxint == 2147483647:
return 32
elif sys.maxint == 9223372036854775807:
return 64
else:
raise RuntimeError( "Unknown architecture" )
#The following code is cut-and-paste from this post:
#http://groups.google.com/group/comp.lang.python/browse_thread/thread/5b71896c06bd0f76/
#Thanks to Michele Simionato, for it
class cached(property):
'Convert a method into a cached attribute'
def __init__(self, method):
private = '_' + method.__name__
def fget(s):
try:
return getattr(s, private)
except AttributeError:
value = method(s)
setattr(s, private, value)
return value
def fdel(s):
del s.__dict__[private]
super(cached, self).__init__(fget, fdel=fdel)
@staticmethod
def reset(self):
cls = self.__class__
for name in dir(cls):
attr = getattr(cls, name)
if isinstance(attr, cached):
delattr(self, name)
class enum( object ):
"""Usage example:
class fruits(enum):
apple = 0
orange = 1
fruits.has_value( 1 )
fruits.name_of( 1 )
"""
@classmethod
def has_value( cls, enum_numeric_value ):
for name, value in cls.__dict__.iteritems():
if enum_numeric_value == value:
return True
else:
return False
@classmethod
def name_of( cls, enum_numeric_value ):
for name, value in cls.__dict__.iteritems():
if enum_numeric_value == value:
return name
else:
raise RuntimeError( 'Unable to find name for value(%d) in enumeration "%s"'
% ( enum_numeric_value, cls.__name__ ) )
| {
"content_hash": "919326b94ba960c33bce67401b0b9406",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 91,
"avg_line_length": 33.5364238410596,
"alnum_prop": 0.6273696682464455,
"repo_name": "eile/ITK",
"id": "cb51ec1c2581954ccc47b9bccf13f14d5ccc387f",
"size": "5257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Modules/ThirdParty/pygccxml/src/pygccxml/utils/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Bison",
"bytes": "20428"
},
{
"name": "C",
"bytes": "25362189"
},
{
"name": "C#",
"bytes": "1714"
},
{
"name": "C++",
"bytes": "47099474"
},
{
"name": "CSS",
"bytes": "24960"
},
{
"name": "FORTRAN",
"bytes": "2241251"
},
{
"name": "HTML",
"bytes": "208088"
},
{
"name": "Io",
"bytes": "1833"
},
{
"name": "Java",
"bytes": "57739"
},
{
"name": "Makefile",
"bytes": "11691"
},
{
"name": "Objective-C",
"bytes": "72946"
},
{
"name": "Objective-C++",
"bytes": "6591"
},
{
"name": "Perl",
"bytes": "19692"
},
{
"name": "Prolog",
"bytes": "4406"
},
{
"name": "Python",
"bytes": "892773"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "118013"
},
{
"name": "Tcl",
"bytes": "74786"
},
{
"name": "XSLT",
"bytes": "194772"
}
],
"symlink_target": ""
} |
import pytest
import hail as hl
from ..helpers import resource
@pytest.fixture(scope='session')
def mt():
return hl.read_matrix_table(resource('sample.vcf-20-partitions.mt'))
@pytest.fixture(scope='session')
def ht(mt):
return mt.rows()
@pytest.fixture(scope='session', params=[hl.locus, hl.Locus])
def probe_locus(request):
return request.param('20', 17434581)
@pytest.fixture(scope='session', params=[hl.locus, hl.Locus])
def probe_variant(request):
return hl.Struct(locus=request.param('20', 17434581), alleles=['A', 'G'])
| {
"content_hash": "dfbe0e52c51ee6581ef40089787fb98c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 77,
"avg_line_length": 22.958333333333332,
"alnum_prop": 0.705989110707804,
"repo_name": "hail-is/hail",
"id": "a57420c1e84c72b28b1f3aba668c70eb77f1dd03",
"size": "551",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "hail/python/test/hail/extract_intervals/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7729"
},
{
"name": "C",
"bytes": "779"
},
{
"name": "C++",
"bytes": "171899"
},
{
"name": "CMake",
"bytes": "3045"
},
{
"name": "CSS",
"bytes": "666"
},
{
"name": "Dockerfile",
"bytes": "10056"
},
{
"name": "Emacs Lisp",
"bytes": "377"
},
{
"name": "HCL",
"bytes": "54923"
},
{
"name": "HTML",
"bytes": "155946"
},
{
"name": "Java",
"bytes": "38401"
},
{
"name": "JavaScript",
"bytes": "877"
},
{
"name": "Jupyter Notebook",
"bytes": "305748"
},
{
"name": "MLIR",
"bytes": "20"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Python",
"bytes": "5635857"
},
{
"name": "R",
"bytes": "3038"
},
{
"name": "SCSS",
"bytes": "33487"
},
{
"name": "Scala",
"bytes": "5050997"
},
{
"name": "Shell",
"bytes": "75539"
},
{
"name": "XSLT",
"bytes": "5748"
}
],
"symlink_target": ""
} |
import openmoves
from commands import AddUser
from model import db, User, Move, MoveEdit
from flask import json
import pytest
import html5lib
import re
import os
from datetime import timedelta, datetime
from gpx_import import GPX_IMPORT_OPTION_PAUSE_DETECTION, GPX_IMPORT_OPTION_PAUSE_DETECTION_THRESHOLD, GPX_DEVICE_NAME, \
GPX_ACTIVITY_TYPE, GPX_DEVICE_SERIAL, GPX_SAMPLE_TYPE, GPX_TRK, GPX_IMPORT_PAUSE_TYPE_PAUSE_DETECTION
app = None
class TestOpenMoves(object):
@classmethod
def setup_class(cls):
global app
app = openmoves.init(configfile=None)
db_uri = 'sqlite:///:memory:'
app.config.update(SQLALCHEMY_ECHO=False, WTF_CSRF_ENABLED=False, DEBUG=True, TESTING=True, SQLALCHEMY_DATABASE_URI=db_uri, SECRET_KEY="testing")
def setup_method(self, method):
self.app = app
self.client = app.test_client()
def _login(self, username='test_user', password='test password'):
data = {'username': username, 'password': password, 'timezone': 'Europe/Berlin'}
return self.client.post('/login', data=data, follow_redirects=True)
def _validate_response(self, response, tmpdir=None, code=200, check_content=True):
if tmpdir:
tmpdir.join("response.html").write(response.data, mode='wb')
assert response.status_code == code, "HTTP status: %s" % response.status
if response.data:
response_data = response.data.decode('utf-8')
if check_content:
if response.mimetype == 'text/html':
self._validate_html5(response_data)
elif response.mimetype == 'application/json':
return json.loads(response_data)
else:
raise ValueError("illegal mimetype: '%s'" % response.mimetype)
return response_data
def _validate_html5(self, response_data):
parser = html5lib.HTMLParser(strict=True)
parser.parse(response_data)
def _assert_requires_login(self, url, method='GET'):
expected_url = 'login?next=%s' % url.replace('/', '%2F')
return self._assert_redirects(url, expected_url, code=302, method=method)
def _assert_redirects(self, url, location, code=301, method='GET', **requestargs):
if method == 'GET':
response = self.client.get(url, **requestargs)
elif method == 'POST':
response = self.client.post(url, **requestargs)
else:
raise ValueError("illegal method: %s" % method)
assert response.status_code == code
if location.startswith("/"):
location = location[1:]
assert response.headers["Location"] == "http://localhost/%s" % location
def test_initialize_config(self, tmpdir):
tmpfile = tmpdir.join("openmoves.cfg")
openmoves.initialize_config(tmpfile)
lines = tmpfile.readlines()
assert len(lines) == 1
assert re.match(r"SECRET_KEY = '[a-f0-9]{64}'", lines[0]), "unexpected line: %s" % lines[0]
def test_initialize_config_subsequent_calls_differ(self, tmpdir):
tmpfile1 = tmpdir.join("openmoves1.cfg")
tmpfile2 = tmpdir.join("openmoves2.cfg")
openmoves.initialize_config(tmpfile1)
openmoves.initialize_config(tmpfile2)
assert tmpfile1.read() != tmpfile2.read()
def test_create_schema(self):
with app.test_request_context():
db.create_all()
def test_add_user(self):
with app.test_request_context():
assert User.query.count() == 0
cmd = AddUser(lambda: app.app_context(), app_bcrypt=openmoves.app_bcrypt)
cmd.run(username='test_user')
with app.test_request_context():
assert User.query.count() == 1
assert User.query.filter_by(username='test_user').one()
with pytest.raises(AssertionError) as e:
cmd.run(username='test_user')
assert u"user already exists" in str(e.value)
cmd.run(username='test_user2')
with app.test_request_context():
assert User.query.count() == 2
assert User.query.filter_by(username='test_user').one() != User.query.filter_by(username='test_user2').one()
def test_index(self, tmpdir):
response = self.client.get('/')
response_data = response_data = self._validate_response(response, tmpdir)
assert u"An open source alternative" in response_data
assert u"0 moves already analyzed" in response_data
def test_login_get(self, tmpdir):
response = self.client.get('/login')
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Login</title>" in response_data
assert u"Please sign in" in response_data
def test_login_invalid(self, tmpdir):
data = {'username': 'user which does not exist', 'password': 'test password'}
response = self.client.post('/login', data=data)
response_data = self._validate_response(response, tmpdir)
assert u"no such user" in response_data
assert u"Please sign in" in response_data
def test_login_valid(self, tmpdir):
username = 'test_user'
password = 'test password'
with app.test_request_context():
User.query.delete(synchronize_session=False)
user = User(username=username, active=True)
user.password = openmoves.app_bcrypt.generate_password_hash(password, 10)
db.session.add(user)
db.session.commit()
response = self._login()
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Dashboard</title>" in response_data
def test_login_logout_other_user(self, tmpdir):
username = 'other_user'
password = u'Paßswörd→✓≈'
with app.test_request_context():
user = User(username=username, active=True)
user.password = openmoves.app_bcrypt.generate_password_hash(password, 10)
db.session.add(user)
db.session.commit()
response = self._login(username=username, password=password)
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Dashboard</title>' in response_data
assert username in response_data
response = self.client.get('/logout', follow_redirects=True)
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves</title>' in response_data
assert username not in response_data
def test_custom_404(self, tmpdir):
response = self.client.get('/page-which-does-not-exist')
response_data = self._validate_response(response, tmpdir, code=404, check_content=True)
assert u"<title>OpenMoves – Not found</title>" in response_data
def test_moves_not_logged_in(self, tmpdir):
self._assert_requires_login('/moves')
def test_moves_empty(self, tmpdir):
self._login()
response = self.client.get('/moves')
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Moves</title>" in response_data
assert u'No moves in selected date range' in response_data
def test_move_not_logged_in(self, tmpdir):
self._assert_requires_login('/moves/1')
def test_move_not_found(self, tmpdir):
self._login()
response = self.client.get('/moves/1')
self._validate_response(response, code=404, check_content=False)
def test_delete_move_not_logged_in(self, tmpdir):
self._assert_requires_login('/moves/1/delete')
def test_delete_move_not_found(self, tmpdir):
self._login()
response = self.client.get('/moves/1/delete')
self._validate_response(response, code=404, check_content=False)
def test_dashboard_not_logged_in(self, tmpdir):
self._assert_requires_login('/dashboard')
def test_dashboard_empty(self, tmpdir):
self._login()
response = self.client.get('/dashboard')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Dashboard</title>' in response_data
def test_export_move_not_found(self, tmpdir):
self._login()
response = self.client.get('/moves/1/export')
self._validate_response(response, code=404, check_content=False)
def test_export_move_not_logged_in(self, tmpdir):
self._assert_requires_login('/moves/1/export')
def test_import_move(self, tmpdir):
self._login()
response = self.client.get('/import')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Import</title>' in response_data
assert 'Please find' in response_data
assert '%AppData%/Suunto/Moveslink2' in response_data
def test_import_move_upload_single(self, tmpdir):
self._login()
data = {}
filename = 'CAFEBABECAFEBABE-2014-11-09T14_55_13-0.sml.gz'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
response = self.client.post('/import', data=data, follow_redirects=True)
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Move 1</title>' in response_data
assert u"imported '%s': move 1" % filename in response_data
assert u'>Pool swimming</' in response_data
assert u'<td>2014-11-09 14:55:13</td>' in response_data
assert u'<td>02:07.80 min / 100 m</td>' in response_data
assert u'<td>795</td>' in response_data # strokes
# first pause
assert u'<span class="date-time">2014-11-09 15:15:49.991</span>' in response_data
assert u'<span class="date-time">2014-11-09 15:26:45.314</span>' in response_data
assert u'<td>00:10:55.32</td>' in response_data
with app.test_request_context():
move = Move.query.one()
assert move.recovery_time is None
def test_import_move_upload_multiple(self, tmpdir):
self._login()
data = {}
dn = os.path.dirname(os.path.realpath(__file__))
filename1 = 'CAFEBABECAFEBABE-2014-12-31T12_00_32-0.sml.gz'
filename2 = 'log-CAFEBABECAFEBABE-2014-07-23T18_56_14-5.xml.gz'
with open(os.path.join(dn, filename1), 'rb') as file1:
with open(os.path.join(dn, filename2), 'rb') as file2:
data['files'] = [(file1, filename1), (file2, filename2)]
response = self.client.post('/import', data=data, follow_redirects=True)
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Moves</title>' in response_data
assert u'imported 2 moves' in response_data
with app.test_request_context():
move = Move.query.filter(Move.activity == 'Trekking').one()
assert move.ascent_time == timedelta(seconds=1181)
move = Move.query.filter(Move.activity == 'Cycling').one()
assert move.distance == 21277
def test_import_move_upload_gpx(self, tmpdir):
self._login()
data = {}
filename = 'baerensee_testtrack.gpx'
# filename = 'Move_2013_07_21_15_26_53_Trail+running.gpx.gz'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
response = self.client.post('/import', data=data, follow_redirects=True)
with app.test_request_context():
count = Move.query.count()
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Move %d</title>" % count in response_data
assert u'imported' in response_data
assert filename in response_data
with app.test_request_context():
move = Move.query.filter(Move.activity == GPX_ACTIVITY_TYPE).one()
assert move.device.name == GPX_DEVICE_NAME
assert move.device.serial_number == GPX_DEVICE_SERIAL
assert move.activity == GPX_ACTIVITY_TYPE
assert move.date_time == datetime(2015, 1, 1, 10, 0, 0 , 0)
assert move.duration == timedelta(minutes=18)
assert int(move.distance) == 1800
assert move.log_item_count == 8 + 2 # 2 entries for the pause events
assert move.log_item_count == move.samples.count()
# Altitudes
assert move.altitude_max == move.samples[4].altitude
assert move.altitude_max == move.samples[4].gps_altitude
assert move.altitude_min == move.samples[0].altitude
assert move.altitude_min == move.samples[7].altitude
assert move.ascent == 600
assert move.descent == 1200
assert move.ascent_time == timedelta(minutes=6) - timedelta(microseconds=1)
assert move.descent_time == timedelta(minutes=12) - timedelta(microseconds=1)
# Speed
assert round(move.speed_avg, 1) == round(6 / 3.6, 1)
assert round(move.speed_max, 1) == round(30 / 3.6, 1)
assert move.speed_max == move.samples[5].speed
# Pause events
events = [sample for sample in move.samples if sample.events]
assert len(events) == 2
start_pause_sample = events[0].events['pause']
assert start_pause_sample['state'].lower() == 'true'
assert start_pause_sample['duration'] == str(timedelta(minutes=54))
assert int(float(start_pause_sample['distance'])) == 142
assert start_pause_sample['type'] == GPX_TRK
end_pause_sample = events[1].events['pause']
assert end_pause_sample['state'].lower() == 'false'
assert end_pause_sample['duration'] == str(0)
assert int(float(end_pause_sample['distance'])) == 0
assert end_pause_sample['type'] == GPX_TRK
# Temperatures
assert round(move.temperature_min - 273.15, 1) == 17.0
assert round(move.temperature_max - 273.15, 1) == 29.0
# HR
assert round(move.hr_max * 60.0, 1) == 210.0
assert round(move.hr_min * 60.0, 1) == 50.0
assert round(move.hr_avg * 60.0, 1) == 127.5
previous_sample = None
for sample in move.samples:
if sample.sample_type != GPX_SAMPLE_TYPE:
continue
assert sample.time >= timedelta(seconds=0)
assert sample.utc == move.date_time + sample.time
assert sample.distance >= 0
assert sample.sea_level_pressure >= 100
assert sample.sea_level_pressure <= 1035
assert sample.temperature >= move.temperature_min
assert sample.temperature <= move.temperature_max
assert sample.energy_consumption >= 1.0
assert sample.energy_consumption <= 2.0
assert sample.speed >= 0.0
assert round(sample.speed, 1) <= 30.0 / 3.6
assert sample.vertical_speed >= 0.1
assert sample.vertical_speed <= 0.6
if previous_sample:
assert sample.time > previous_sample.time
assert sample.utc > previous_sample.utc
assert sample.distance >= previous_sample.distance
previous_sample = sample
# Finally delete the GPX import for next GPX import test with pause detection
response_delete = self.client.get('/moves/%d/delete' % count, follow_redirects=True)
response_delete_data = self._validate_response(response_delete, tmpdir)
assert u"move %d deleted" % count in response_delete_data
def test_import_move_upload_gpx_with_pause_detection(self, tmpdir):
"""GPX import with pause detection option"""
self._login()
data = {}
filename = 'baerensee_testtrack.gpx'
# filename = 'Move_2013_07_21_15_26_53_Trail+running.gpx.gz'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
data[GPX_IMPORT_OPTION_PAUSE_DETECTION] = 'on'
data[GPX_IMPORT_OPTION_PAUSE_DETECTION_THRESHOLD] = '479'
response = self.client.post('/import', data=data, follow_redirects=True)
with app.test_request_context():
count = Move.query.count()
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Move %d</title>" % count in response_data
assert u'imported' in response_data
assert filename in response_data
with app.test_request_context():
move = Move.query.filter(Move.activity == GPX_ACTIVITY_TYPE).one()
assert move.device.name == GPX_DEVICE_NAME
assert move.device.serial_number == GPX_DEVICE_SERIAL
assert move.date_time == datetime(2015, 1, 1, 10, 0, 0 , 0)
assert move.duration == timedelta(minutes=18 - 8) # 8min by pause detection
assert int(move.distance) == 1800 - 400 # 400m by pause detection
assert move.log_item_count == 8 + 4 # 4 entries for the pause events
assert move.log_item_count == move.samples.count()
# Attention: samples are not sorted by UTC
assert move.altitude_max == move.samples[4].altitude
assert move.altitude_max == move.samples[4].gps_altitude
assert move.altitude_min == move.samples[0].altitude
assert move.altitude_min == move.samples[9].altitude
assert move.ascent == 600
assert move.descent == 1200 - 400 # 400m by pause_detection
assert move.ascent_time == timedelta(minutes=6) - timedelta(microseconds=1)
assert move.descent_time == timedelta(minutes=(12 - 8)) - timedelta(microseconds=1 + 2) # 8min by pause detection
# Speed
assert round(move.speed_avg, 1) == round(8.4 / 3.6, 1)
assert round(move.speed_max, 1) == round(30 / 3.6, 1)
assert move.speed_max == move.samples[5].speed
# Pause events
events = [sample for sample in move.samples if sample.events]
assert len(events) == 2 + 2 # 2 pauses by pause_detection
start_pause_sample = events[2].events['pause']
assert start_pause_sample['state'].lower() == 'true'
assert start_pause_sample['duration'] == str(timedelta(minutes=54))
assert int(float(start_pause_sample['distance'])) == 142
assert start_pause_sample['type'] == GPX_TRK
end_pause_sample = events[3].events['pause']
assert end_pause_sample['state'].lower() == 'false'
assert end_pause_sample['duration'] == str(0)
assert int(float(end_pause_sample['distance'])) == 0
assert end_pause_sample['type'] == GPX_TRK
start_pause_sample = events[0].events['pause']
assert start_pause_sample['state'].lower() == 'true'
assert start_pause_sample['duration'] == str(timedelta(minutes=8)) # 8min by pause detection
assert int(float(start_pause_sample['distance'])) == 400
assert start_pause_sample['type'] == GPX_IMPORT_PAUSE_TYPE_PAUSE_DETECTION
end_pause_sample = events[1].events['pause']
assert end_pause_sample['state'].lower() == 'false'
assert end_pause_sample['duration'] == str(0)
assert int(float(end_pause_sample['distance'])) == 0
assert end_pause_sample['type'] == GPX_IMPORT_PAUSE_TYPE_PAUSE_DETECTION
def test_import_move_already_exists(self, tmpdir):
self._login()
data = {}
with app.test_request_context():
count_before = Move.query.count()
filename = 'baerensee_testtrack.gpx'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
response = self.client.post('/import', data=data, follow_redirects=True)
with app.test_request_context():
count_after = Move.query.count()
assert count_after == count_before
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Import</title>' in response_data
assert u'already exists' in response_data
def test_moves(self, tmpdir):
self._login()
response = self.client.get('/moves?start_date=2014-01-01&end_date=2014-12-31')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Moves</title>' in response_data
assert u'<td><a href="/moves/1">2014-11-09 14:55:13</a></td>' in response_data
assert u'<td><a href="/moves/2">2014-12-31 12:00:32</a></td>' in response_data
assert u'<td><a href="/moves/3">2014-07-23 18:56:14</a></td>' in response_data
assert u'All moves <span class="badge">3</span>' in response_data
assert u'Cycling <span class="badge">1</span>' in response_data
assert u'Trekking <span class="badge">1</span>' in response_data
assert u'Pool swimming <span class="badge">1</span>' in response_data
assert u'>Pool swimming</' in response_data
assert u'<td>00:31:25.00</td>' in response_data
assert u'<td>1475 m</td>' in response_data
assert u'<td><span>2.8 km/h</span></td>' in response_data
assert u'<td><span>27.4°C</span></td>' in response_data
assert u'<td>795</td>' in response_data
def test_moves_with_date_range(self, tmpdir):
self._login()
response = self.client.get('/moves?start_date=2014-11-09&end_date=2014-11-09')
response_data = self._validate_response(response, tmpdir)
assert u'<td><a href="/moves/1">2014-11-09 14:55:13</a></td>' in response_data
assert u'All moves <span class="badge">1</span>' in response_data
assert u'Pool swimming <span class="badge">1</span>' in response_data
assert u'Cycling' not in response_data
assert u'Trekking' not in response_data
response = self.client.get('/moves?start_date=2014-07-01&end_date=2014-12-01')
response_data = self._validate_response(response, tmpdir)
assert u'<td><a href="/moves/1">2014-11-09 14:55:13</a></td>' in response_data
assert u'<td><a href="/moves/3">2014-07-23 18:56:14</a></td>' in response_data
assert u'All moves <span class="badge">2</span>' in response_data
assert u'Cycling <span class="badge">1</span>' in response_data
assert u'Pool swimming <span class="badge">1</span>' in response_data
assert u'Trekking' not in response_data
def test_move_pages(self, tmpdir):
self._login()
with app.test_request_context():
for move in Move.query:
response = self.client.get("/moves/%d" % move.id)
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Move %d</title>" % move.id in response_data
assert u">%s</" % move.activity in response_data
def test_csv_export_filename(self, tmpdir):
self._login()
response = self.client.get('/moves/1/export?format=csv')
assert response.headers['Content-Disposition'] == 'attachment; filename=Move_2014-11-09T14_55_13_Pool+swimming.csv'
response = self.client.get('/moves/2/export?format=csv')
assert response.headers['Content-Disposition'] == 'attachment; filename=Move_2014-12-31T12_00_32_DE_Stegen_Trekking.csv'
def test_csv_exports(self, tmpdir):
self._login()
with app.test_request_context():
for move in Move.query:
response = self.client.get("/moves/%d/export?format=csv" % move.id)
response_data = self._validate_response(response, tmpdir, check_content=False)
lines = response_data.split('\r\n')
header = lines[0]
assert 'Timestamp;Duration;Latitude' in header
assert len(lines) == move.samples.count() + 1
def test_gpx_export(self, tmpdir):
self._login()
response = self.client.get('/moves/3/export?format=gpx')
response_data = self._validate_response(response, tmpdir, check_content=False)
assert response.headers['Content-Disposition'] == 'attachment; filename=Move_2014-07-23T18_56_14_DE_Rheinbach_Cycling.gpx'
assert u'<gpx ' in response_data
assert u'lat="50.632' in response_data
assert u'lon="6.952' in response_data
def test_gpx_export_umlaut_in_filename(self, tmpdir):
with app.test_request_context():
move = Move.query.filter(Move.id == 3).one()
move.location_raw = {'address': {'city_district': u'Galtür', 'country_code': 'at'}}
db.session.commit()
self._login()
response = self.client.get('/moves/3/export?format=gpx')
assert response.headers['Content-Disposition'] == u'attachment; filename=Move_2014-07-23T18_56_14_AT_Galt%C3%BCr_Cycling.gpx'
def test_move_with_heart_rate(self, tmpdir):
self._login()
data = {}
filename = 'CAFEBABECAFEBABE-2014-11-02T13_08_09-0.sml.gz'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
response = self.client.post('/import', data=data, follow_redirects=True)
with app.test_request_context():
count = Move.query.count()
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Move %d</title>" % count in response_data
assert u'>Kayaking</' in response_data
assert u'<th>Avg. Heart Rate</th>' in response_data
assert u'<td><span>97 bpm</span></td>' in response_data
response = self.client.get('/moves?start_date=2014-01-01&end_date=2020-01-01')
response_data = self._validate_response(response, tmpdir)
assert re.search(u'<th><a href="/moves.+?">Heart Rate</a></th>', response_data)
assert u'<td><span>97 bpm</span></td>' in response_data
def test_import_move_with_relative_performance_level(self, tmpdir):
self._login()
data = {}
filename = 'BABECAFEBABECAFE-2015-06-25T18_45_58-0.sml.gz'
dn = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dn, filename), 'rb') as f:
data['files'] = [(f, filename)]
response = self.client.post('/import', data=data, follow_redirects=True)
with app.test_request_context():
count = Move.query.count()
response_data = self._validate_response(response, tmpdir)
assert u"<title>OpenMoves – Move %d</title>" % count in response_data
assert u'>Running</' in response_data
assert filename in response_data
def test_activity_types_not_logged_in(self, tmpdir):
self._assert_requires_login('/activity_types')
def test_activity_types(self, tmpdir):
self._login()
response = self.client.get('/activity_types')
response_data = self._validate_response(response, tmpdir)
expected_activities = ('Cycling', 'Kayaking', 'Pool swimming', 'Running', 'Trekking', 'Unknown activity')
expected_data = [{"text": activity, "value": activity} for activity in expected_activities]
assert response_data == expected_data
def test_edit_move_not_logged_in(self, tmpdir):
self._assert_requires_login("/moves/1", method='POST')
def test_edit_move_illegal_name(self, tmpdir):
self._login()
data = {'name': 'some illegal name', 'pk': 1}
with pytest.raises(ValueError) as e:
self.client.post('/moves/1', data=data)
assert u"illegal name" in str(e.value)
def test_dashboard_all_moves(self, tmpdir):
self._login()
response = self.client.get('/dashboard?start_date=2014-01-01&end_date=2014-12-31')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Dashboard</title>' in response_data
assert u'>#Moves<' in response_data
assert u'>4<' in response_data
assert u'<th>Total Distance</th><td>33.55 km</td>' in response_data
assert u'<th>Total Duration</th><td>03:47:09.60</td>' in response_data
assert u'<th>Total Average</th>' in response_data
assert u'<th>Total Ascent</th><td>110 m</td>' in response_data
assert u'<th>Total Descent</th><td>218 m</td>' in response_data
def test_dashboard_with_date_range(self, tmpdir):
self._login()
response = self.client.get('/dashboard?start_date=2014-08-01&end_date=2014-12-01')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Dashboard</title>' in response_data
assert u'>#Moves<' in response_data
assert u'>2<' in response_data
assert u'<th>Total Distance</th><td>10.05 km</td>' in response_data
assert u'<th>Total Duration</th><td>02:20:23.30</td>' in response_data
def test_edit_move_different_user(self, tmpdir):
username = 'some different user'
password = 'some other password'
with app.test_request_context():
user = User(username=username, active=True)
user.password = openmoves.app_bcrypt.generate_password_hash(password, 10)
db.session.add(user)
db.session.commit()
self._login(username=username, password=password)
data = {'name': 'activity', 'pk': 1}
response = self.client.post('/moves/1', data=data)
assert response.status_code == 404
def test_edit_move_activity_illegal_value(self, tmpdir):
self._login()
data = {'name': 'activity', 'pk': 1, 'value': 'illegal activity'}
with pytest.raises(ValueError) as e:
self.client.post('/moves/1', data=data)
assert u"illegal activity" in str(e.value)
def test_edit_move_activity_success(self, tmpdir):
self._login()
data = {'name': 'activity', 'pk': 1, 'value': 'Trekking'}
response = self.client.post('/moves/1', data=data)
response_data = self._validate_response(response, check_content=False)
assert response_data == 'OK'
with app.test_request_context():
move_edit = MoveEdit.query.one()
assert move_edit.date_time
assert move_edit.move_id == 1
assert move_edit.old_value == {'activity': 'Pool swimming', 'activity_type': 6}
assert move_edit.new_value == {'activity': 'Trekking', 'activity_type': 11}
def test_delete_moves_batch(self, tmpdir):
self._login()
with app.test_request_context():
total_moves_before = Move.query.count()
assert total_moves_before > 2
ids = [str(move.id) for move in Move.query[:2]]
self.client.get("/moves/%s/delete" % ','.join(ids), follow_redirects=False)
response = self.client.get('/moves?start_date=2013-01-01&end_date=2020-01-01')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Moves</title>' in response_data
total_moves = Move.query.count()
assert total_moves == total_moves_before - len(ids)
assert u"All moves <span class=\"badge\">%d</span>" % total_moves in response_data
def test_delete_moves(self, tmpdir):
self._login()
with app.test_request_context():
total_moves_before = Move.query.count()
assert total_moves_before > 0
for idx, move in enumerate(Move.query):
self.client.get("/moves/%d/delete" % move.id, follow_redirects=False)
response = self.client.get('/moves?start_date=2013-01-01&end_date=2020-01-01')
response_data = self._validate_response(response, tmpdir)
assert u'<title>OpenMoves – Moves</title>' in response_data
total_moves = Move.query.count()
assert total_moves == total_moves_before - (idx + 1)
if total_moves > 0:
assert u"All moves <span class=\"badge\">%d</span>" % total_moves in response_data
else:
assert u'No moves in selected date range.' in response_data
total_moves = Move.query.count()
assert total_moves == 0
| {
"content_hash": "39c97e0ec9b9a12113aee5b3c9b21c40",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 152,
"avg_line_length": 45.24242424242424,
"alnum_prop": 0.6119466601717104,
"repo_name": "mourningsun75/openmoves",
"id": "e87ad725c5db3a3c343598b39dc0c504313c3879",
"size": "32929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_openmoves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10389"
},
{
"name": "HTML",
"bytes": "54891"
},
{
"name": "JavaScript",
"bytes": "89361"
},
{
"name": "Mako",
"bytes": "371"
},
{
"name": "Python",
"bytes": "130642"
}
],
"symlink_target": ""
} |
"""
Scripts to manage categories.
Syntax: python category.py action [-option]
where action can be one of these:
* add - mass-add a category to a list of pages
* remove - remove category tag from all pages in a category
* move - move all pages in a category to another category
* tidy - tidy up a category by moving its articles into subcategories
* tree - show a tree of subcategories of a given category
* listify - make a list of all of the articles that are in a category
and option can be one of these:
Options for "add" action:
* -person - sort persons by their last name
* -create - If a page doesn't exist, do not skip it, create it instead
If action is "add", the following options are supported:
¶ms;
Options for "listify" action:
* -overwrite - This overwrites the current page with the list even if
something is already there.
* -showimages - This displays images rather than linking them in the list.
* -talkpages - This outputs the links to talk pages of the pages to be
listified in addition to the pages themselves.
Options for "remove" action:
* -nodelsum - This specifies not to use the custom edit summary as the
deletion reason. Instead, it uses the default deletion reason
for the language, which is "Category was disbanded" in
English.
Options for several actions:
* -rebuild - reset the database
* -from: - The category to move from (for the move option)
Also, the category to remove from in the remove option
Also, the category to make a list of in the listify option
* -to: - The category to move to (for the move option)
- Also, the name of the list to make in the listify option
NOTE: If the category names have spaces in them you may need to use
a special syntax in your shell so that the names aren't treated as
separate parameters. For instance, in BASH, use single quotes,
e.g. -from:'Polar bears'
* -batch - Don't prompt to delete emptied categories (do it
automatically).
* -summary: - Pick a custom edit summary for the bot.
* -inplace - Use this flag to change categories in place rather than
rearranging them.
* -recurse - Recurse through all subcategories of categories.
* -pagesonly - While removing pages from a category, keep the subpage links
and do not remove them
* -match - Only work on pages whose titles match the given regex (for
move and remove actions).
For the actions tidy and tree, the bot will store the category structure
locally in category.dump. This saves time and server load, but if it uses
these data later, they may be outdated; use the -rebuild parameter in this
case.
For example, to create a new category from a list of persons, type:
python category.py add -person
and follow the on-screen instructions.
Or to do it all from the command-line, use the following syntax:
python category.py move -from:US -to:'United States'
This will move all pages in the category US to the category United States.
"""
#
# (C) Rob W.W. Hooft, 2004
# (C) Daniel Herding, 2004
# (C) Wikipedian, 2004-2008
# (C) leogregianin, 2004-2008
# (C) Cyde, 2006-2010
# (C) Anreas J Schwab, 2007
# (C) xqt, 2009-2012
# (C) Pywikipedia team, 2008-2012
#
__version__ = '$Id$'
#
# Distributed under the terms of the MIT license.
#
import os, re, pickle, bz2
import pywikibot
from pywikibot import catlib, config, pagegenerators
from pywikibot import i18n
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
cfd_templates = {
'wikipedia' : {
'en':[u'cfd', u'cfr', u'cfru', u'cfr-speedy', u'cfm', u'cfdu'],
'fi':[u'roskaa', u'poistettava', u'korjattava/nimi', u'yhdistettäväLuokka'],
'he':[u'הצבעת מחיקה', u'למחוק'],
'nl':[u'categorieweg', u'catweg', u'wegcat', u'weg2']
},
'commons' : {
'commons':[u'cfd', u'move']
}
}
class CategoryDatabase:
'''This is a temporary knowledge base saving for each category the contained
subcategories and articles, so that category pages do not need to be loaded
over and over again
'''
def __init__(self, rebuild = False, filename = 'category.dump.bz2'):
if rebuild:
self.rebuild()
else:
try:
if not os.path.isabs(filename):
filename = config.datafilepath(filename)
f = bz2.BZ2File(filename, 'r')
pywikibot.output(u'Reading dump from %s'
% config.shortpath(filename))
databases = pickle.load(f)
f.close()
# keys are categories, values are 2-tuples with lists as entries.
self.catContentDB = databases['catContentDB']
# like the above, but for supercategories
self.superclassDB = databases['superclassDB']
del databases
except:
# If something goes wrong, just rebuild the database
self.rebuild()
def rebuild(self):
self.catContentDB={}
self.superclassDB={}
def getSubcats(self, supercat):
'''For a given supercategory, return a list of Categorys for all its
subcategories. Saves this list in a temporary database so that it won't
be loaded from the server next time it's required.
'''
# if we already know which subcategories exist here
if supercat in self.catContentDB:
return self.catContentDB[supercat][0]
else:
subcatset = set(supercat.subcategories())
articleset = set(supercat.articles())
# add to dictionary
self.catContentDB[supercat] = (subcatset, articleset)
return subcatset
def getArticles(self, cat):
'''For a given category, return a list of Pages for all its articles.
Saves this list in a temporary database so that it won't be loaded from the
server next time it's required.
'''
# if we already know which articles exist here
if cat in self.catContentDB:
return self.catContentDB[cat][1]
else:
subcatset = set(cat.subcategories())
articleset = set(cat.articles())
# add to dictionary
self.catContentDB[cat] = (subcatset, articleset)
return articleset
def getSupercats(self, subcat):
# if we already know which subcategories exist here
if subcat in self.superclassDB:
return self.superclassDB[subcat]
else:
supercatset = set(subcat.categories())
# add to dictionary
self.superclassDB[subcat] = supercatset
return supercatset
def dump(self, filename = 'category.dump.bz2'):
'''Saves the contents of the dictionaries superclassDB and catContentDB
to disk.
'''
if not os.path.isabs(filename):
filename = config.datafilepath(filename)
if self.catContentDB or self.superclassDB:
pywikibot.output(u'Dumping to %s, please wait...'
% config.shortpath(filename))
f = bz2.BZ2File(filename, 'w')
databases = {
'catContentDB': self.catContentDB,
'superclassDB': self.superclassDB
}
# store dump to disk in binary format
try:
pickle.dump(databases, f, protocol=pickle.HIGHEST_PROTOCOL)
except pickle.PicklingError:
pass
f.close()
else:
try:
os.remove(filename)
except EnvironmentError:
pass
else:
pywikibot.output(u'Database is empty. %s removed'
% config.shortpath(filename))
class AddCategory:
'''A robot to mass-add a category to a list of pages.'''
def __init__(self, generator, sort_by_last_name=False, create=False,
editSummary='', dry=False):
self.generator = generator
self.sort = sort_by_last_name
self.create = create
self.site = pywikibot.getSite()
self.always = False
self.dry = dry
self.newcatTitle = None
self.editSummary = editSummary
def sorted_by_last_name(self, catlink, pagelink):
'''Return a Category with key that sorts persons by their last name.
Parameters: catlink - The Category to be linked
pagelink - the Page to be placed in the category
Trailing words in brackets will be removed. Example: If
category_name is 'Author' and pl is a Page to [[Alexandre Dumas
(senior)]], this function will return this Category:
[[Category:Author|Dumas, Alexandre]]
'''
page_name = pagelink.title()
site = pagelink.site
# regular expression that matches a name followed by a space and
# disambiguation brackets. Group 1 is the name without the rest.
bracketsR = re.compile('(.*) \(.+?\)')
match_object = bracketsR.match(page_name)
if match_object:
page_name = match_object.group(1)
split_string = page_name.split(' ')
if len(split_string) > 1:
# pull last part of the name to the beginning, and append the
# rest after a comma; e.g., "John von Neumann" becomes
# "Neumann, John von"
sorted_key = split_string[-1] + ', ' + \
' '.join(split_string[:-1])
# give explicit sort key
return pywikibot.Page(site, catlink.title() + '|' + sorted_key)
else:
return pywikibot.Page(site, catlink.title())
def run(self):
self.newcatTitle = pywikibot.input(
u'Category to add (do not give namespace):')
if not self.site.nocapitalize:
self.newcatTitle = self.newcatTitle[:1].upper() + \
self.newcatTitle[1:]
if not self.editSummary:
self.editSummary = i18n.twtranslate(self.site, 'category-adding',
{'newcat': self.newcatTitle})
counter = 0
for page in self.generator:
self.treat(page)
counter += 1
pywikibot.output(u"%d page(s) processed." % counter)
def load(self, page):
"""
Loads the given page, does some changes, and saves it.
"""
try:
# Load the page
text = page.get()
except pywikibot.NoPage:
if self.create:
pywikibot.output(u"Page %s doesn't exist yet; creating."
% (page.title(asLink=True)))
text = ''
else:
pywikibot.output(u"Page %s does not exist; skipping."
% page.title(asLink=True))
except pywikibot.IsRedirectPage, arg:
redirTarget = pywikibot.Page(self.site, arg.args[0])
pywikibot.output(u"WARNING: Page %s is a redirect to %s; skipping."
% (page.title(asLink=True),
redirTarget.title(asLink=True)))
else:
return text
return None
def save(self, text, page, comment, minorEdit=True, botflag=True):
# only save if something was changed
if text != page.get():
# show what was changed
pywikibot.showDiff(page.get(), text)
pywikibot.output(u'Comment: %s' %comment)
if not self.dry:
if not self.always:
confirm = 'y'
while True:
choice = pywikibot.inputChoice(
u'Do you want to accept these changes?',
['Yes', 'No', 'Always'], ['y', 'N', 'a'], 'N')
if choice == 'a':
confirm = pywikibot.inputChoice(u"""\
This should be used if and only if you are sure that your links are correct!
Are you sure?""", ['Yes', 'No'], ['y', 'n'], 'n')
if confirm == 'y':
self.always = True
break
else: break
if self.always or choice == 'y':
try:
# Save the page
page.put(text, comment=comment,
minorEdit=minorEdit, botflag=botflag)
except pywikibot.LockedPage:
pywikibot.output(u"Page %s is locked; skipping."
% page.title(asLink=True))
except pywikibot.EditConflict:
pywikibot.output(
u'Skipping %s because of edit conflict'
% (page.title()))
except pywikibot.SpamfilterError, error:
pywikibot.output(
u'Cannot change %s because of spam blacklist entry %s'
% (page.title(), error.url))
else:
return True
return False
def treat(self, page):
text = self.load(page)
if text is None:
return
cats = [c for c in page.categories()]
# Show the title of the page we're working on.
# Highlight the title in purple.
pywikibot.output(
u"\n\n>>> \03{lightpurple}%s\03{default} <<<"
% page.title())
pywikibot.output(u"Current categories:")
for cat in cats:
pywikibot.output(u"* %s" % cat.title())
catpl = pywikibot.Page(self.site, self.newcatTitle, defaultNamespace=14)
if catpl in cats:
pywikibot.output(u"%s is already in %s."
% (page.title(), catpl.title()))
else:
if self.sort:
catpl = self.sorted_by_last_name(catpl, page)
pywikibot.output(u'Adding %s' % catpl.title(asLink=True))
cats.append(catpl)
text = pywikibot.replaceCategoryLinks(text, cats)
if not self.save(text, page, self.editSummary):
pywikibot.output(u'Page %s not saved.'
% page.title(asLink=True))
class CategoryMoveRobot:
"""Robot to move pages from one category to another."""
def __init__(self, oldCatTitle, newCatTitle, batchMode=False,
editSummary='', inPlace=False, moveCatPage=True,
deleteEmptySourceCat=True, titleRegex=None,
useSummaryForDeletion=True):
site = pywikibot.getSite()
self.editSummary = editSummary
self.oldCat = catlib.Category(pywikibot.Link('Category:' + oldCatTitle))
self.newCatTitle = newCatTitle
self.inPlace = inPlace
self.moveCatPage = moveCatPage
self.batchMode = batchMode
self.deleteEmptySourceCat = deleteEmptySourceCat
self.titleRegex = titleRegex
self.useSummaryForDeletion = useSummaryForDeletion
def run(self):
site = pywikibot.getSite()
newCat = catlib.Category(pywikibot.Link('Category:' + self.newCatTitle))
newcat_contents = set(newCat.members())
# set edit summary message
if not self.editSummary:
self.editSummary = i18n.twtranslate(site, 'category-replacing',\
{'oldcat':self.oldCat.title(),
'newcat':newCat.title()})
# Copy the category contents to the new category page
copied = False
oldMovedTalk = None
if self.oldCat.exists() and self.moveCatPage:
copied = self.oldCat.copyAndKeep(
self.newCatTitle,
pywikibot.translate(site, cfd_templates),
i18n.twtranslate(site, 'category-renamed')
)
# Also move the talk page
if copied:
reason = i18n.twtranslate(site, 'category-was-moved', \
{'newcat':self.newCatTitle, 'title':self.newCatTitle})
oldTalk = self.oldCat.toggleTalkPage()
if oldTalk.exists():
newTalkTitle = newCat.toggleTalkPage().title()
try:
talkMoved = oldTalk.move(newTalkTitle, reason)
except (pywikibot.NoPage, pywikibot.PageNotSaved), e:
#in order :
#Source talk does not exist, or
#Target talk already exists
pywikibot.output(e.message)
else:
if talkMoved:
oldMovedTalk = oldTalk
# Move articles
gen = pagegenerators.CategorizedPageGenerator(self.oldCat,
recurse=False)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
for article in preloadingGen:
if not self.titleRegex or re.search(self.titleRegex,
article.title()):
if article in newcat_contents:
catlib.change_category(article, self.oldCat, None,
comment=self.editSummary,
inPlace=self.inPlace)
else:
catlib.change_category(article, self.oldCat, newCat,
comment=self.editSummary,
inPlace=self.inPlace)
# Move subcategories
gen = pagegenerators.SubCategoriesPageGenerator(self.oldCat,
recurse=False)
preloadingGen = pagegenerators.PreloadingGenerator(gen)
for subcategory in preloadingGen:
if not self.titleRegex or re.search(self.titleRegex,
subcategory.title()):
if subcategory in newcat_contents:
catlib.change_category(subcategory, self.oldCat, None,
comment=self.editSummary,
inPlace=self.inPlace)
else:
catlib.change_category(subcategory, self.oldCat, newCat,
comment=self.editSummary,
inPlace=self.inPlace)
# Delete the old category and its moved talk page
if copied and self.deleteEmptySourceCat == True:
if self.oldCat.isEmptyCategory():
reason = i18n.twtranslate(site, 'category-was-moved', \
{'newcat': self.newCatTitle, 'title': self.newCatTitle})
confirm = not self.batchMode
self.oldCat.delete(reason, confirm, mark = True)
if oldMovedTalk is not None:
oldMovedTalk.delete(reason, confirm, mark = True)
else:
pywikibot.output('Couldn\'t delete %s - not empty.'
% self.oldCat.title())
class CategoryListifyRobot:
'''Creates a list containing all of the members in a category.'''
def __init__(self, catTitle, listTitle, editSummary, overwrite = False, showImages = False, subCats = False, talkPages = False, recurse = False):
self.editSummary = editSummary
self.overwrite = overwrite
self.showImages = showImages
self.site = pywikibot.getSite()
self.cat = catlib.Category(pywikibot.Link('Category:' + catTitle))
self.list = pywikibot.Page(self.site, listTitle)
self.subCats = subCats
self.talkPages = talkPages
self.recurse = recurse
def run(self):
setOfArticles = set(self.cat.articles(recurse = self.recurse))
if self.subCats:
setOfArticles = setOfArticles.union(set(self.cat.subcategories()))
if not self.editSummary:
self.editSummary = i18n.twtranslate(self.site,
'category-listifying',
{'fromcat': self.cat.title(),
'num': len(setOfArticles)})
listString = ""
for article in setOfArticles:
if (not article.isImage() or self.showImages) and not article.isCategory():
if self.talkPages and not article.isTalkPage():
listString = listString + "*[[%s]] -- [[%s|talk]]\n" % (article.title(), article.toggleTalkPage().title())
else:
listString = listString + "*[[%s]]\n" % article.title()
else:
if self.talkPages and not article.isTalkPage():
listString = listString + "*[[:%s]] -- [[%s|talk]]\n" % (article.title(), article.toggleTalkPage().title())
else:
listString = listString + "*[[:%s]]\n" % article.title()
if self.list.exists() and not self.overwrite:
pywikibot.output(u'Page %s already exists, aborting.' % self.list.title())
else:
self.list.put(listString, comment=self.editSummary)
class CategoryRemoveRobot:
'''Removes the category tag from all pages in a given category
and if pagesonly parameter is False also from the category pages of all
subcategories, without prompting. If the category is empty, it will be
tagged for deleting. Does not remove category tags pointing at
subcategories.
'''
def __init__(self, catTitle, batchMode=False, editSummary='',
useSummaryForDeletion=True, titleRegex=None, inPlace=False,
pagesonly=False):
self.editSummary = editSummary
self.site = pywikibot.getSite()
self.cat = catlib.Category(pywikibot.Link('Category:' + catTitle))
# get edit summary message
self.useSummaryForDeletion = useSummaryForDeletion
self.batchMode = batchMode
self.titleRegex = titleRegex
self.inPlace = inPlace
self.pagesonly = pagesonly
if not self.editSummary:
self.editSummary = i18n.twtranslate(self.site, 'category-removing',
{'oldcat': self.cat.title()})
def run(self):
articles = set(self.cat.articles())
if len(articles) == 0:
pywikibot.output(u'There are no articles in category %s' % self.cat.title())
else:
for article in articles:
if not self.titleRegex or re.search(self.titleRegex,article.title()):
catlib.change_category(article, self.cat, None, comment = self.editSummary, inPlace = self.inPlace)
if self.pagesonly:
return
# Also removes the category tag from subcategories' pages
subcategories = set(self.cat.subcategories())
if len(subcategories) == 0:
pywikibot.output(u'There are no subcategories in category %s' % self.cat.title())
else:
for subcategory in subcategories:
catlib.change_category(subcategory, self.cat, None, comment = self.editSummary, inPlace = self.inPlace)
# Deletes the category page
if self.cat.exists() and self.cat.isEmptyCategory():
if self.useSummaryForDeletion and self.editSummary:
reason = self.editSummary
else:
reason = i18n.twtranslate(self.site, 'category-was-disbanded')
talkPage = self.cat.toggleTalkPage()
try:
self.cat.delete(reason, not self.batchMode)
except pywikibot.NoUsername:
pywikibot.output(u'You\'re not setup sysop info, category will not delete.' % self.cat.site())
return
if (talkPage.exists()):
talkPage.delete(reason=reason, prompt=not self.batchMode)
class CategoryTidyRobot:
"""Script to help a human to tidy up a category by moving its articles into
subcategories
Specify the category name on the command line. The program will pick up the
page, and look for all subcategories and supercategories, and show them with
a number adjacent to them. It will then automatically loop over all pages
in the category. It will ask you to type the number of the appropriate
replacement, and perform the change robotically.
If you don't want to move the article to a subcategory or supercategory, but to
another category, you can use the 'j' (jump) command.
Typing 's' will leave the complete page unchanged.
Typing '?' will show you the first few bytes of the current page, helping
you to find out what the article is about and in which other categories it
currently is.
Important:
* this bot is written to work with the MonoBook skin, so make sure your bot
account uses this skin
"""
def __init__(self, catTitle, catDB):
self.catTitle = catTitle
self.catDB = catDB
self.site = pywikibot.getSite()
self.editSummary = i18n.twtranslate(self.site, 'category-changing')\
% {'oldcat':self.catTitle, 'newcat':u''}
def move_to_category(self, article, original_cat, current_cat):
'''
Given an article which is in category original_cat, ask the user if
it should be moved to one of original_cat's subcategories.
Recursively run through subcategories' subcategories.
NOTE: current_cat is only used for internal recursion. You should
always use current_cat = original_cat.
'''
pywikibot.output(u'')
# Show the title of the page where the link was found.
# Highlight the title in purple.
pywikibot.output(u'Treating page \03{lightpurple}%s\03{default}, currently in \03{lightpurple}%s\03{default}' % (article.title(), current_cat.title()))
# Determine a reasonable amount of context to print
try:
full_text = article.get(get_redirect = True)
except pywikibot.NoPage:
pywikibot.output(u'Page %s not found.' % article.title())
return
try:
contextLength = full_text.index('\n\n')
except ValueError: # substring not found
contextLength = 500
if full_text.startswith(u'[['): # probably an image
# Add extra paragraph.
contextLength = full_text.find('\n\n', contextLength+2)
if contextLength > 1000 or contextLength < 0:
contextLength = 500
print
pywikibot.output(full_text[:contextLength])
print
subcatlist = self.catDB.getSubcats(current_cat)
supercatlist = self.catDB.getSupercats(current_cat)
print
if len(subcatlist) == 0:
print 'This category has no subcategories.'
print
if len(supercatlist) == 0:
print 'This category has no supercategories.'
print
# show subcategories as possible choices (with numbers)
for i in range(len(supercatlist)):
# layout: we don't expect a cat to have more than 10 supercats
pywikibot.output(u'u%d - Move up to %s' % (i, supercatlist[i].title()))
for i in range(len(subcatlist)):
# layout: we don't expect a cat to have more than 100 subcats
pywikibot.output(u'%2d - Move down to %s' % (i, subcatlist[i].title()))
print ' j - Jump to another category'
print ' s - Skip this article'
print ' r - Remove this category tag'
print ' ? - Print first part of the page (longer and longer)'
pywikibot.output(u'Enter - Save category as %s' % current_cat.title())
flag = False
while not flag:
print ''
choice = pywikibot.input(u'Choice:')
if choice in ['s', 'S']:
flag = True
elif choice == '':
pywikibot.output(u'Saving category as %s' % current_cat.title())
if current_cat == original_cat:
print 'No changes necessary.'
else:
catlib.change_category(article, original_cat, current_cat, comment = self.editSummary)
flag = True
elif choice in ['j', 'J']:
newCatTitle = pywikibot.input(u'Please enter the category the article should be moved to:')
newCat = catlib.Category(pywikibot.Link('Category:' + newCatTitle))
# recurse into chosen category
self.move_to_category(article, original_cat, newCat)
flag = True
elif choice in ['r', 'R']:
# remove the category tag
catlib.change_category(article, original_cat, None, comment = self.editSummary)
flag = True
elif choice == '?':
contextLength += 500
print
pywikibot.output(full_text[:contextLength])
print
# if categories possibly weren't visible, show them additionally
# (maybe this should always be shown?)
if len(full_text) > contextLength:
print ''
print 'Original categories: '
for cat in article.categories():
pywikibot.output(u'* %s' % cat.title())
elif choice[0] == 'u':
try:
choice=int(choice[1:])
except ValueError:
# user pressed an unknown command. Prompt him again.
continue
self.move_to_category(article, original_cat, supercatlist[choice])
flag = True
else:
try:
choice=int(choice)
except ValueError:
# user pressed an unknown command. Prompt him again.
continue
# recurse into subcategory
self.move_to_category(article, original_cat, subcatlist[choice])
flag = True
def run(self):
cat = catlib.Category(pywikibot.Link('Category:' + self.catTitle))
articles = set(cat.articles())
if len(articles) == 0:
pywikibot.output(u'There are no articles in category ' + catTitle)
else:
preloadingGen = pagegenerators.PreloadingGenerator(iter(articles))
for article in preloadingGen:
pywikibot.output('')
pywikibot.output(u'=' * 67)
self.move_to_category(article, cat, cat)
class CategoryTreeRobot:
'''
Robot to create tree overviews of the category structure.
Parameters:
* catTitle - The category which will be the tree's root.
* catDB - A CategoryDatabase object
* maxDepth - The limit beyond which no subcategories will be listed.
This also guarantees that loops in the category structure
won't be a problem.
* filename - The textfile where the tree should be saved; None to print
the tree to stdout.
'''
def __init__(self, catTitle, catDB, filename = None, maxDepth = 10):
self.catTitle = catTitle
self.catDB = catDB
if filename and not os.path.isabs(filename):
filename = config.datafilepath(filename)
self.filename = filename
# TODO: make maxDepth changeable with a parameter or config file entry
self.maxDepth = maxDepth
self.site = pywikibot.getSite()
def treeview(self, cat, currentDepth = 0, parent = None):
'''
Returns a multi-line string which contains a tree view of all subcategories
of cat, up to level maxDepth. Recursively calls itself.
Parameters:
* cat - the Category of the node we're currently opening
* currentDepth - the current level in the tree (for recursion)
* parent - the Category of the category we're coming from
'''
result = u'#' * currentDepth
result += '[[:%s|%s]]' % (cat.title(), cat.title().split(':', 1)[1])
result += ' (%d)' % len(self.catDB.getArticles(cat))
# We will remove an element of this array, but will need the original array
# later, so we create a shallow copy with [:]
supercats = self.catDB.getSupercats(cat)[:]
# Find out which other cats are supercats of the current cat
try:
supercats.remove(parent)
except:
pass
if supercats != []:
supercat_names = []
for i in range(len(supercats)):
# create a list of wiki links to the supercategories
supercat_names.append('[[:%s|%s]]' % (supercats[i].title(), supercats[i].title().split(':', 1)[1]))
# print this list, separated with commas, using translations given in also_in_cats
result += ' ' + i18n.twtranslate(self.site, 'category-also-in',
{'alsocat': ', '.join(supercat_names)})
result += '\n'
if currentDepth < self.maxDepth:
for subcat in self.catDB.getSubcats(cat):
# recurse into subdirectories
result += self.treeview(subcat, currentDepth + 1, parent = cat)
else:
if self.catDB.getSubcats(cat) != []:
# show that there are more categories beyond the depth limit
result += '#' * (currentDepth + 1) + '[...]\n'
return result
def run(self):
"""Prints the multi-line string generated by treeview or saves it to a
file.
Parameters:
* catTitle - the title of the category which will be the tree's root
* maxDepth - the limit beyond which no subcategories will be listed
"""
cat = catlib.Category(pywikibot.Link('Category:' + self.catTitle))
tree = self.treeview(cat)
if self.filename:
pywikibot.output(u'Saving results in %s' % self.filename)
import codecs
f = codecs.open(self.filename, 'a', 'utf-8')
f.write(tree)
f.close()
else:
pywikibot.output(tree, toStdout = True)
def main(*args):
global catDB
fromGiven = False
toGiven = False
batchMode = False
editSummary = ''
inPlace = False
overwrite = False
showImages = False
talkPages = False
recurse = False
titleRegex = None
pagesonly = False
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
# The generator gives the pages that should be worked upon.
gen = None
# If this is set to true then the custom edit summary given for removing
# categories from articles will also be used as the deletion reason.
useSummaryForDeletion = True
catDB = CategoryDatabase()
action = None
sort_by_last_name = False
restore = False
create_pages = False
for arg in pywikibot.handleArgs(*args):
if arg == 'add':
action = 'add'
elif arg == 'remove':
action = 'remove'
elif arg == 'move':
action = 'move'
elif arg == 'tidy':
action = 'tidy'
elif arg == 'tree':
action = 'tree'
elif arg == 'listify':
action = 'listify'
elif arg == '-person':
sort_by_last_name = True
elif arg == '-rebuild':
catDB.rebuild()
elif arg.startswith('-from:'):
oldCatTitle = arg[len('-from:'):].replace('_', ' ')
fromGiven = True
elif arg.startswith('-to:'):
newCatTitle = arg[len('-to:'):].replace('_', ' ')
toGiven = True
elif arg == '-batch':
batchMode = True
elif arg == '-inplace':
inPlace = True
elif arg == '-nodelsum':
useSummaryForDeletion = False
elif arg == '-overwrite':
overwrite = True
elif arg == '-showimages':
showImages = True
elif arg.startswith('-summary:'):
editSummary = arg[len('-summary:'):]
elif arg.startswith('-match'):
if len(arg) == len('-match'):
titleRegex = pywikibot.input(
u'Which regular expression should affected objects match?')
else:
titleRegex = arg[len('-match:'):]
elif arg == '-talkpages':
talkPages = True
elif arg == '-recurse':
recurse = True
elif arg == '-pagesonly':
pagesonly = True
elif arg == '-create':
create_pages = True
else:
genFactory.handleArg(arg)
pywikibot.Site().login()
gen = genFactory.getCombinedGenerator()
if action == 'add':
if not gen:
# default for backwards compatibility
genFactory.handleArg('-links')
gen = genFactory.getCombinedGenerator()
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
gen = pagegenerators.PreloadingGenerator(gen)
bot = AddCategory(gen, sort_by_last_name, create_pages, editSummary)
bot.run()
elif action == 'remove':
if (fromGiven == False):
oldCatTitle = pywikibot.input(
u'Please enter the name of the category that should be removed:')
bot = CategoryRemoveRobot(oldCatTitle, batchMode, editSummary,
useSummaryForDeletion, inPlace=inPlace,
pagesonly=pagesonly)
bot.run()
elif action == 'move':
if (fromGiven == False):
oldCatTitle = pywikibot.input(
u'Please enter the old name of the category:')
if (toGiven == False):
newCatTitle = pywikibot.input(
u'Please enter the new name of the category:')
bot = CategoryMoveRobot(oldCatTitle, newCatTitle, batchMode,
editSummary, inPlace, titleRegex=titleRegex)
bot.run()
elif action == 'tidy':
catTitle = pywikibot.input(u'Which category do you want to tidy up?')
bot = CategoryTidyRobot(catTitle, catDB)
bot.run()
elif action == 'tree':
catTitle = pywikibot.input(
u'For which category do you want to create a tree view?')
filename = pywikibot.input(
u'Please enter the name of the file where the tree should be saved,\n'
u'or press enter to simply show the tree:')
bot = CategoryTreeRobot(catTitle, catDB, filename)
bot.run()
elif action == 'listify':
if (fromGiven == False):
oldCatTitle = pywikibot.input(
u'Please enter the name of the category to listify:')
if (toGiven == False):
newCatTitle = pywikibot.input(
u'Please enter the name of the list to create:')
bot = CategoryListifyRobot(oldCatTitle, newCatTitle, editSummary,
overwrite, showImages, subCats=True,
talkPages=talkPages, recurse=recurse)
bot.run()
else:
pywikibot.showHelp('category')
if __name__ == "__main__":
try:
main()
except pywikibot.Error:
pywikibot.error("Fatal error:", exc_info=True)
finally:
catDB.dump()
pywikibot.stopme()
| {
"content_hash": "e8e89db57448ae81a9c34615f790844b",
"timestamp": "",
"source": "github",
"line_count": 960,
"max_line_length": 159,
"avg_line_length": 42.030208333333334,
"alnum_prop": 0.564400604723785,
"repo_name": "pywikibot/core-migration-example",
"id": "be2ea117bcab68888ed99ea4fc65797f7ebc3996",
"size": "40408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/category.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2685207"
}
],
"symlink_target": ""
} |
from module.duplicates import search_duplicates
from pprint import pprint
import argparse
import os
def r_dir(prospective_dir):
if not os.path.isdir(prospective_dir):
raise Exception("readable_dir:{0} is not a valid path".format(prospective_dir))
if not os.access(prospective_dir, os.R_OK):
raise Exception("readable_dir:{0} is not a readable dir".format(prospective_dir))
return prospective_dir
parser = argparse.ArgumentParser(description='A set of tools to facilitate the work with the disk.')
parser.add_argument('command', help='Command to execute', choices=['duplicates'])
parser.add_argument('-t', '--target', help='Target directory', type=r_dir, metavar='DIRECTORY', default=os.getcwd())
parser.add_argument('-v', '--verbose', help='Switch to verbose mode', action='store_true')
args = parser.parse_args()
{
'duplicates': lambda: pprint(search_duplicates(args.target, args))
}[args.command]() | {
"content_hash": "872c9b8cba6e5ad43f3ed70529441f0d",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 116,
"avg_line_length": 36.15384615384615,
"alnum_prop": 0.7244680851063829,
"repo_name": "aiskov/storekeeper",
"id": "b8b85b0cfb68b9de79770db2e067ed7105f2765a",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storekeeper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2000"
}
],
"symlink_target": ""
} |
def test_stub():
pass
| {
"content_hash": "dce710a2107b0f59f6398892f350d473",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 16,
"avg_line_length": 13,
"alnum_prop": 0.5769230769230769,
"repo_name": "avanov/Presentations",
"id": "7e3cbdd1a5c8d0a48158dbf576f11fffbefd9a7e",
"size": "26",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5881"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import os, yaml
def pytest_addoption(parser):
sibis = parser.getgroup('sibis', description='SIBIS specific test options', after='usage')
sibis.addoption("--config-file", action="store", default=os.path.expanduser("~/.sibis-general-config.yml"),
help="Path to SIBIS General Configuration File")
sibis.addoption("--cluster-job", action="store_true", default=False, help="Running as cluster job")
sibis.addoption("--enable-special", nargs="*", choices=['sample_session', 'default_general_config'])
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.config_file
print("opt_val: >{0}<".format(option_value))
with open(option_value, 'r') as f:
general_cfg = yaml.safe_load(f)
if 'config' in metafunc.fixturenames and general_cfg is not None:
print("general_config: " + repr(general_cfg))
metafunc.parametrize("config", [general_cfg])
if 'config_file' in metafunc.fixturenames and general_cfg is not None:
metafunc.parametrize("config_file", [option_value])
option_value = metafunc.config.option.cluster_job
if 'cluster_job' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("cluster_job", [option_value])
special_opts = metafunc.config.option.enable_special or ['none']
if 'special_opts' in metafunc.fixturenames and special_opts is not None:
metafunc.parametrize('special_opts', special_opts)
def pytest_configure(config):
config.addinivalue_line(
"markers", "cluster_job(enabled): marks tests as cluster enabled. Run these tests with --cluster-job option."
)
| {
"content_hash": "267c62baab8ad0d281af9a1307b23041",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 117,
"avg_line_length": 45.21052631578947,
"alnum_prop": 0.6915017462165308,
"repo_name": "sibis-platform/sibis",
"id": "c8d6ec3a5ddf7956858eb2e8ae8f3dfa5952d95b",
"size": "1718",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21616"
},
{
"name": "Shell",
"bytes": "572"
}
],
"symlink_target": ""
} |
"""
Defines an explorer that does a general inspection of wbem servers
to determine information like namespace, etc.
"""
from __future__ import print_function, absolute_import
import traceback
import time
from collections import namedtuple
from urlparse import urlparse
import threading
from pywbem import WBEMConnection, WBEMServer, ValueMapping, Error, \
ConnectionError, TimeoutError, AuthError
from ._ping import ping_host
from .config import PING_TIMEOUT, DEFAULT_USERNAME, DEFAULT_PASSWORD
from ._logging import get_logger, SmiPypingLoggers, logged_api_call, \
EXPLORE_LOGGER_NAME, SMIPYPING_LOGGER_NAME
__all__ = ['Explorer', ]
LOG = get_logger(__name__)
SMIPYPING_LOG = get_logger(SMIPYPING_LOGGER_NAME)
# named tuple for information about opened servers.
ServerInfoTuple = namedtuple('ServerInfoTuple',
['url', 'server', 'target_id', 'status', 'time'])
RESULTS = []
class Explorer(object):
"""
The Explorer class provides a general capability to explore providers
defined in a database including getting information on server branding,
namespaces, interop namespaces, profiles, etc.
It is designed to explore a number of servers and provide a report of
the results with logging to capture information on each individual
WBEM Server
"""
def __init__(self, prog, targets_tbl, logfile=None, log_level=None,
debug=None, ping=None, verbose=None, threaded=False,
output_format='simple'):
"""Initialize instance attributes."""
self.verbose = verbose
self.ping = ping
self.targets_tbl = targets_tbl
self.timeout = None
self.prog = prog
self.debug = debug
self.threaded = threaded
self.explore_time = None
self.output_format = output_format
log_dest = 'file' if log_level else None
SmiPypingLoggers.create_logger(log_component='explore',
log_dest=log_dest,
log_filename=logfile,
log_level=log_level)
self.logger = get_logger(EXPLORE_LOGGER_NAME)
def explore_servers(self, target_list):
"""
Explore the basic characteristics of a list of servers including
existence, branding, etc.
Parameters:
target_list - List of target_ids to explore
Returns:
List of namedtuple ServerInfoTuple representing the results of
the explore
"""
self.explore_time = time.time()
if self.threaded:
servers = self.explore_threaded(target_list)
else:
servers = self.explore_non_threaded(target_list)
self.explore_time = time.time() - self.explore_time
self.logger.info('Explore Threaded=%s time=%.2f s', self.threaded,
self.explore_time)
return servers
@logged_api_call
def explore_non_threaded(self, target_list):
"""Explore a the list of targets without using threading"""
servers = []
# #### TODO move this all back to IDs and stop mapping host to id.
for target_id in target_list:
target = self.targets_tbl[target_id]
# get variables for the connection and logs
url = target.self.targets_tbl.build_url(target_id)
credential = target['Credential']
principal = target['Principal']
product = target['Product']
company_name = target['CompanyName']
log_info = 'id=%s Url=%s Product=%s Company=%s' % (target_id, url,
product,
company_name)
# TODO too much swapping between entities.
# TODO need a target class since this goes back to top dict to
# get info
if self.targets_tbl.disabled_target(target):
s = ServerInfoTuple(url=url, server=None, status='DISABLE',
target_id=target_id, time=0)
servers.append(s)
self.logger.info('Disabled %s ', log_info)
else:
svr_tuple = self.explore_server(url, target, principal,
credential)
servers.append(svr_tuple)
return servers
@logged_api_call
def explore_threaded(self, target_list):
"""
Threaded scan of IP Addresses for open ports.
Scan the IP address defined by the input and return a list of open
IP addresses. This function creates multiple processes and executes
each call in a process for speed.
"""
servers = []
# #### TODO move this all back to IDs and stop mapping host to id.
threads_ = []
for target_id in target_list:
target = self.targets_tbl[target_id]
# get variables for the connection and logs
url = '%s://%s' % (target['Protocol'], target['IPAddress'])
credential = target['Credential']
principal = target['Principal']
product = target['Product']
company_name = target['CompanyName']
log_info = 'id=%s Url=%s Product=%s Company=%s' % (target_id, url,
product,
company_name)
# TODO too much swapping between entities.
# TODO need a target class since this goes back to top dict to
# get info
if self.targets_tbl.disabled_target(target):
s = ServerInfoTuple(url=url, server=None, status='DISABLE',
target_id=target_id, time=0)
servers.append(s)
self.logger.info('Disabled %s ', log_info)
else:
process = threading.Thread(target=self.explore_server,
args=(url, target, principal,
credential))
threads_.append(process)
for process in threads_:
process.start()
for process in threads_:
process.join()
for result in RESULTS:
servers.append(result)
return servers
@logged_api_call
def explore_server(self, url, target, principal, credential):
""" Explore a cim server for characteristics defined by
the server class including namespaces, brand, version, etc. info.
Return: The ServerInfoTuple object
"""
cmd_time = 0
start_time = time.time() # Scan start time
target_id = target['TargetID']
principal = target.get('Principal', DEFAULT_USERNAME)
credential = target.get('Credential', DEFAULT_PASSWORD)
log_info = 'id=%s Url=%s Product=%s Company=%s' \
% (target['TargetID'], url,
target['Product'],
target['CompanyName'])
svr_tuple = None
if self.ping:
ping_result = self.ping_server(url, self.verbose)
if ping_result is False:
cmd_time = time.time() - start_time
self.logger.error('PING_FAIL %s time %.2f s', log_info,
cmd_time)
svr_tuple = ServerInfoTuple(url=url, server=None,
status='PING_FAIL',
target_id=target_id,
time=cmd_time)
RESULTS.append(svr_tuple)
return svr_tuple
try:
self.logger.info('Open %s', log_info)
conn = WBEMConnection(url, (principal, credential),
no_verification=True, timeout=20)
server = WBEMServer(conn)
# Access the server since the creation of the connection
# and server constructors do not actually contact the
# WBEM server
if self.verbose:
print('Brand:%s, Version:%s, Interop namespace:%s' %
(server.brand, server.version, server.interop_ns))
print("All namespaces: %s" % server.namespaces)
else:
# force access to test server connection
_ = server.interop_ns # noqa: F841
_ = server.profiles # noqa: F841
cmd_time = time.time() - start_time
svr_tuple = ServerInfoTuple(url=url, server=server,
target_id=target_id,
status='OK',
time=cmd_time)
self.logger.info('OK %s time %.2f s', log_info, cmd_time)
except ConnectionError as ce:
cmd_time = time.time() - start_time
self.logger.error('ConnectionError exception:%s %s time %.2f s',
ce, log_info, cmd_time)
err = 'ConnErr'
svr_tuple = ServerInfoTuple(url, server, target_id, err,
cmd_time)
traceback.format_exc()
except TimeoutError as to:
cmd_time = time.time() - start_time
self.logger.error('Pywbem Client Timeout Error exception:%s %s '
'time %.2f s', to, log_info, cmd_time)
err = 'Timeout'
svr_tuple = ServerInfoTuple(url, server, target_id, err,
cmd_time)
traceback.format_exc()
except AuthError as ae:
cmd_time = time.time() - start_time
self.logger.error('PyWBEM AuthEr exception:%s %s time %.2f s',
ae, log_info, cmd_time)
err = 'AuthErr'
svr_tuple = ServerInfoTuple(url, server, target_id, err,
cmd_time)
traceback.format_exc()
except Error as er:
cmd_time = time.time() - start_time
self.logger.error('PyWBEM Error exception:%s %s time %.2f s',
er, log_info, cmd_time)
err = 'PyWBMEr'
svr_tuple = ServerInfoTuple(url, server, target_id, err,
cmd_time)
traceback.format_exc()
except Exception as ex: # pylint: disable=broad-except
cmd_time = time.time() - start_time
self.logger.error('General Error: exception:%s %s time %.2f s',
ex, log_info, cmd_time)
err = 'GenErr'
svr_tuple = ServerInfoTuple(url, server, target_id, err,
cmd_time)
traceback.format_exc()
RESULTS.append(svr_tuple)
return svr_tuple
def ping_server(self, url, verbose): # pylint: disable=no-self-use
"""
Get the netloc from the url and ping the server.
Returns the result text that must match the defined texts.
"""
netloc = urlparse(url).netloc
target_address = netloc.split(':')
result = ping_host(target_address[0], PING_TIMEOUT)
if verbose:
print('Ping host=%s, result=%s' % (target_address[0], result))
return result
def explore_server_profiles(self, server, args, short_explore=True):
"""
Explore the registered profiles and generate an output table of
the profiles for the defined server.
"""
def print_profile_info(org_vm, inst):
"""
Print the registered org, name, version for the profile
defined by inst
"""
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
if args.verbose:
print(" %s %s Profile %s" % (org, name, vers))
org_vm = ValueMapping.for_property(server, server.interop_ns,
'CIM_RegisteredProfile',
'RegisteredOrganization')
if short_explore:
return server
try:
indication_profiles = server.get_selected_profiles(
registered_org='DMTF',
registered_name='Indications')
except TypeError as te:
SMIPYPING_LOG.error('Get_selected_profile failed for url %s, '
'Exception %s, traceback=',
server.conn.url, te, exc_info=True)
raise TypeError('Profile acquisition failed looking for profiless'
'org=DMTF, Name=Indications in url %s' %
server.conn.url)
self.logger.info('Profiles for DMTF:Indications')
for inst in indication_profiles:
print_profile_info(org_vm, inst)
server_profiles = server.get_selected_profiles(registered_org='SNIA')
self.logger.info('SNIA Profiles:')
for inst in server_profiles:
print_profile_info(org_vm, inst)
# get Central Instances
for inst in indication_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
self.logger.info(
"Central instances for profile %s:%s:%s (component):",
org, name, vers)
try:
ci_paths = server.get_central_instances(
inst.path,
"CIM_IndicationService", "CIM_System",
["CIM_HostedService"])
except Exception as exc: # pylint: disable=broad-except
self.logger.error("Error: Central Instances%s", str(exc))
ci_paths = []
for ip in ci_paths:
self.logger.error(" %s", str(ip))
for inst in server_profiles:
org = org_vm.tovalues(inst['RegisteredOrganization'])
name = inst['RegisteredName']
vers = inst['RegisteredVersion']
self.logger.info(
"Central instances for profile %s:%s:%s(autonomous):",
org, name, vers)
try:
ci_paths = server.get_central_instances(inst.path)
except Exception as exc: # pylint: disable=broad-except
print("Exception: %s" % str(exc))
ci_paths = []
for ip in ci_paths:
self.logger.info(" %s", str(ip))
return server
| {
"content_hash": "3e9b8f27f02c85be7b44bfd76a5aac52",
"timestamp": "",
"source": "github",
"line_count": 376,
"max_line_length": 78,
"avg_line_length": 39.545212765957444,
"alnum_prop": 0.5307014594121999,
"repo_name": "KSchopmeyer/smipyping",
"id": "8b6926ea1642a6ddff8689468bbbe85a794a75d4",
"size": "15504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smipyping/_explore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "311"
},
{
"name": "Java",
"bytes": "3825"
},
{
"name": "Makefile",
"bytes": "14381"
},
{
"name": "Perl",
"bytes": "21056"
},
{
"name": "Python",
"bytes": "683098"
},
{
"name": "Shell",
"bytes": "13113"
},
{
"name": "TSQL",
"bytes": "5977898"
}
],
"symlink_target": ""
} |
"""Test for object db"""
import tempfile
import os
from gitdb.test.lib import TestBase
from gitdb.util import (
to_hex_sha,
to_bin_sha,
NULL_HEX_SHA,
LockedFD
)
class TestUtils(TestBase):
def test_basics(self):
assert to_hex_sha(NULL_HEX_SHA) == NULL_HEX_SHA
assert len(to_bin_sha(NULL_HEX_SHA)) == 20
assert to_hex_sha(to_bin_sha(NULL_HEX_SHA)) == NULL_HEX_SHA.encode("ascii")
def _cmp_contents(self, file_path, data):
# raise if data from file at file_path
# does not match data string
fp = open(file_path, "rb")
try:
assert fp.read() == data.encode("ascii")
finally:
fp.close()
def test_lockedfd(self):
my_file = tempfile.mktemp()
orig_data = "hello"
new_data = "world"
my_file_fp = open(my_file, "wb")
my_file_fp.write(orig_data.encode("ascii"))
my_file_fp.close()
try:
lfd = LockedFD(my_file)
lockfilepath = lfd._lockfilepath()
# cannot end before it was started
self.failUnlessRaises(AssertionError, lfd.rollback)
self.failUnlessRaises(AssertionError, lfd.commit)
# open for writing
assert not os.path.isfile(lockfilepath)
wfd = lfd.open(write=True)
assert lfd._fd is wfd
assert os.path.isfile(lockfilepath)
# write data and fail
os.write(wfd, new_data.encode("ascii"))
lfd.rollback()
assert lfd._fd is None
self._cmp_contents(my_file, orig_data)
assert not os.path.isfile(lockfilepath)
# additional call doesnt fail
lfd.commit()
lfd.rollback()
# test reading
lfd = LockedFD(my_file)
rfd = lfd.open(write=False)
assert os.read(rfd, len(orig_data)) == orig_data.encode("ascii")
assert os.path.isfile(lockfilepath)
# deletion rolls back
del(lfd)
assert not os.path.isfile(lockfilepath)
# write data - concurrently
lfd = LockedFD(my_file)
olfd = LockedFD(my_file)
assert not os.path.isfile(lockfilepath)
wfdstream = lfd.open(write=True, stream=True) # this time as stream
assert os.path.isfile(lockfilepath)
# another one fails
self.failUnlessRaises(IOError, olfd.open)
wfdstream.write(new_data.encode("ascii"))
lfd.commit()
assert not os.path.isfile(lockfilepath)
self._cmp_contents(my_file, new_data)
# could test automatic _end_writing on destruction
finally:
os.remove(my_file)
# END final cleanup
# try non-existing file for reading
lfd = LockedFD(tempfile.mktemp())
try:
lfd.open(write=False)
except OSError:
assert not os.path.exists(lfd._lockfilepath())
else:
self.fail("expected OSError")
# END handle exceptions
| {
"content_hash": "9cf1d81899133ab1c89310727b16d949",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 85,
"avg_line_length": 31.12,
"alnum_prop": 0.5600899742930592,
"repo_name": "analurandis/Tur",
"id": "1dee544614ab2064844999eb5f909bbc7d90525a",
"size": "3322",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backend/venv/Lib/site-packages/gitdb/test/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "2277"
},
{
"name": "C",
"bytes": "433673"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "84779"
},
{
"name": "HTML",
"bytes": "340406"
},
{
"name": "JavaScript",
"bytes": "311663"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "14466829"
},
{
"name": "Shell",
"bytes": "3059"
},
{
"name": "TeX",
"bytes": "56837"
}
],
"symlink_target": ""
} |
from xml.dom import minidom
from xml.parsers.expat import ExpatError
class XmlEditor(object):
def __init__(self, xmlstring=""):
self.string = xmlstring
try:
self.tree = minidom.parseString(xmlstring)
except ExpatError as e:
raise ValueError(e)
def to_string(self):
return self.string | {
"content_hash": "9d278506144ff1384fe20e4a732d82d9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 54,
"avg_line_length": 26.846153846153847,
"alnum_prop": 0.6332378223495702,
"repo_name": "heltonbiker/MapComplete",
"id": "c9549ce7017fc3a4cfc3d3cc24b52e369e54f8e4",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "PyQt/xmlEdit/XmlEditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "5100"
},
{
"name": "HTML",
"bytes": "562"
},
{
"name": "Python",
"bytes": "135385"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from sympy.core import S, Add, Expr, Basic, Mul
from sympy.assumptions import Q, ask
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import refine, sqrt, Q
>>> from sympy.abc import x
>>> refine(sqrt(x**2), Q.real(x))
Abs(x)
>>> refine(sqrt(x**2), Q.positive(x))
x
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
ref_expr = expr._eval_refine(assumptions)
if ref_expr is not None:
return ref_expr
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_abs(expr, assumptions):
"""
Handler for the absolute value.
Examples
========
>>> from sympy import Symbol, Q, refine, Abs
>>> from sympy.assumptions.refine import refine_abs
>>> from sympy.abc import x
>>> refine_abs(Abs(x), Q.real(x))
>>> refine_abs(Abs(x), Q.positive(x))
x
>>> refine_abs(Abs(x), Q.negative(x))
-x
"""
from sympy.core.logic import fuzzy_not
from sympy import Abs
arg = expr.args[0]
if ask(Q.real(arg), assumptions) and \
fuzzy_not(ask(Q.negative(arg), assumptions)):
# if it's nonnegative
return arg
if ask(Q.negative(arg), assumptions):
return -arg
# arg is Mul
if isinstance(arg, Mul):
r = [refine(abs(a), assumptions) for a in arg.args]
non_abs = []
in_abs = []
for i in r:
if isinstance(i, Abs):
in_abs.append(i.args[0])
else:
non_abs.append(i)
return Mul(*non_abs) * Abs(Mul(*in_abs))
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function
Examples
========
>>> from sympy import Symbol, Q, refine, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
from sympy.core import S
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
def refine_re(expr, assumptions):
"""
Handler for real part.
>>> from sympy.assumptions.refine import refine_re
>>> from sympy import Q, re
>>> from sympy.abc import x
>>> refine_re(re(x), Q.real(x))
x
>>> refine_re(re(x), Q.imaginary(x))
0
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return arg
if ask(Q.imaginary(arg), assumptions):
return S.Zero
return _refine_reim(expr, assumptions)
def refine_im(expr, assumptions):
"""
Handler for imaginary part.
>>> from sympy.assumptions.refine import refine_im
>>> from sympy import Q, im
>>> from sympy.abc import x
>>> refine_im(im(x), Q.real(x))
0
>>> refine_im(im(x), Q.imaginary(x))
-I*x
"""
arg = expr.args[0]
if ask(Q.real(arg), assumptions):
return S.Zero
if ask(Q.imaginary(arg), assumptions):
return - S.ImaginaryUnit * arg
return _refine_reim(expr, assumptions)
def _refine_reim(expr, assumptions):
# Helper function for refine_re & refine_im
expanded = expr.expand(complex = True)
if expanded != expr:
refined = refine(expanded, assumptions)
if refined != expanded:
return refined
# Best to leave the expression as is
return None
def refine_sign(expr, assumptions):
"""
Handler for sign
Examples
========
>>> from sympy.assumptions.refine import refine_sign
>>> from sympy import Symbol, Q, sign, im
>>> x = Symbol('x', real = True)
>>> expr = sign(x)
>>> refine_sign(expr, Q.positive(x) & Q.nonzero(x))
1
>>> refine_sign(expr, Q.negative(x) & Q.nonzero(x))
-1
>>> refine_sign(expr, Q.zero(x))
0
>>> y = Symbol('y', imaginary = True)
>>> expr = sign(y)
>>> refine_sign(expr, Q.positive(im(y)))
I
>>> refine_sign(expr, Q.negative(im(y)))
-I
"""
arg = expr.args[0]
if ask(Q.zero(arg), assumptions):
return S.Zero
if ask(Q.real(arg)):
if ask(Q.positive(arg), assumptions):
return S.One
if ask(Q.negative(arg), assumptions):
return S.NegativeOne
if ask(Q.imaginary(arg)):
arg_re, arg_im = arg.as_real_imag()
if ask(Q.positive(arg_im), assumptions):
return S.ImaginaryUnit
if ask(Q.negative(arg_im), assumptions):
return -S.ImaginaryUnit
return expr
handlers_dict = {
'Abs': refine_abs,
'Pow': refine_Pow,
'atan2': refine_atan2,
'Equality': refine_Relational,
'Unequality': refine_Relational,
'GreaterThan': refine_Relational,
'LessThan': refine_Relational,
'StrictGreaterThan': refine_Relational,
'StrictLessThan': refine_Relational,
're': refine_re,
'im': refine_im,
'sign': refine_sign
}
| {
"content_hash": "d810c971a8813aa271be402319e36e27",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 78,
"avg_line_length": 30.235632183908045,
"alnum_prop": 0.5379205474244441,
"repo_name": "kaushik94/sympy",
"id": "efc753ee4d240565254858bf04b5db35f64c9fec",
"size": "10522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/assumptions/refine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5094"
},
{
"name": "Python",
"bytes": "13553568"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "TeX",
"bytes": "32356"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
class BuildableReference(object):
def __init__(self, entry_item):
self.contents = entry_item;
self.BuildableIdentifier = self.contents.get('BuildableIdentifier', None)
self.BlueprintIdentifier = self.contents.get('BlueprintIdentifier', None)
self.BuildableName = self.contents.get('BuildableName', None);
self.BlueprintName = self.contents.get('BlueprintName', None);
self.ReferencedContainer = self.contents.get('ReferencedContainer', None); | {
"content_hash": "196a135deeed646894a51c8e4f2716f5",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 82,
"avg_line_length": 55.44444444444444,
"alnum_prop": 0.7034068136272545,
"repo_name": "samdmarshall/pyxcscheme",
"id": "41eee857f75abe6de742d3a0f8237472c76fe6ad",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyxcscheme/BuildableReference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9926"
}
],
"symlink_target": ""
} |
"""Example usage of terminaltables using colorclass.
Just prints sample text and exits.
"""
from __future__ import print_function
from colorclass import Color, Windows
from terminaltables import SingleTable
Windows.enable(auto_colors=True, reset_atexit=True) # Does nothing if not on Windows.
table_data = [
[Color('{autogreen}<10ms{/autogreen}'), '192.168.0.100, 192.168.0.101'],
[Color('{autoyellow}10ms <= 100ms{/autoyellow}'), '192.168.0.102, 192.168.0.103'],
[Color('{autored}>100ms{/autored}'), '192.168.0.105'],
]
table = SingleTable(table_data)
table.inner_heading_row_border = False
print()
print(table.table)
table.title = '192.168.0.105'
table.justify_columns = {0: 'center', 1: 'center', 2: 'center'}
table.inner_row_border = True
table.table_data = [
[Color('Low Space'), Color('{autocyan}Nominal Space{/autocyan}'), Color('Excessive Space')],
[Color('Low Load'), Color('Nominal Load'), Color('{autored}High Load{/autored}')],
[Color('{autocyan}Low Free RAM{/autocyan}'), Color('Nominal Free RAM'), Color('High Free RAM')],
]
print()
print(table.table)
table.title = None
table.outer_border = False
table.table_data = [['A', 'B'], ['C', 'D']]
print()
print(table.table)
table.outer_border = True
table.inner_row_border = False
table.inner_column_border = False
print()
print(table.table)
table = SingleTable([['Obey Obey Obey Obey']], 'Instructions')
print()
print(table.table)
print()
| {
"content_hash": "6a1d19e655efe8cf07b33f81cdc757eb",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 100,
"avg_line_length": 27.615384615384617,
"alnum_prop": 0.6894150417827298,
"repo_name": "rtulke/terminaltables",
"id": "9a82d20be5a6e11b217b126e5cffb86ed90bf009",
"size": "1458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73168"
}
],
"symlink_target": ""
} |
import sys
import readline
# Our buggy program
def remove_html_markup(s):
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
# main program that runs the buggy program
def main():
print remove_html_markup('xyz')
print remove_html_markup('"<b>foo</b>"')
print remove_html_markup("'<b>foo</b>'")
# globals
breakpoints = {9: True}
stepping = False
"""
Our debug function
Improve and expand this function to accept
a breakpoint command 'b <line>'.
Add the line number to the breakpoints dictionary
or print 'You must supply a line number'
if 'b' is not followed by a line number.
"""
def debug(command, my_locals):
global stepping
global breakpoints
if command.find(' ') > 0:
arg = command.split(' ')[1]
else:
arg = None
if command.startswith('s'): # step
stepping = True
return True
elif command.startswith('c'): # continue
stepping = False
return True
elif command.startswith('p'): # print
if arg == None:
print my_locals
elif arg in my_locals:
print arg, " = ", repr(my_locals[arg])
else:
print "No such variable:", arg
elif command.startswith('b'): # breakpoint
if arg != None:
breakpoints[int(arg)] = True
else:
print 'You must supply a line number'
elif command.startswith('q'): # quit
sys.exit(0)
else:
print "No such command", repr(command)
return False
commands = ["b 5", "c", "c", "q"]
def input_command():
#command = raw_input("(my-spyder) ")
global commands
command = commands.pop(0)
return command
def traceit(frame, event, trace_arg):
global stepping
if event == 'line':
if stepping or breakpoints.has_key(frame.f_lineno):
resume = False
while not resume:
print event, frame.f_lineno, frame.f_code.co_name, frame.f_locals
command = input_command()
resume = debug(command, frame.f_locals)
return traceit
# Using the tracer
#sys.settrace(traceit)
#main()
#sys.settrace(None)
#Simple test
print breakpoints
debug("b 5", {'quote': False, 's': 'xyz', 'tag': False, 'c': 'b', 'out': ''})
print breakpoints == {9: True, 5: True}
#>>> True
| {
"content_hash": "7617b8d83e79a9bd911d30bb52db173a",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 81,
"avg_line_length": 25.135922330097088,
"alnum_prop": 0.5658555426805717,
"repo_name": "Quexint/Assignment-Driven-Learning",
"id": "6ed8d5e56c121fec5081d320a05a59182887fcee",
"size": "2664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OCW/[Udacity]Software_Debugging/src/1.AddBreakpointCommand.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59411"
},
{
"name": "C++",
"bytes": "1201623"
},
{
"name": "Cool",
"bytes": "77973"
},
{
"name": "HTML",
"bytes": "4530"
},
{
"name": "Java",
"bytes": "1226154"
},
{
"name": "Jupyter Notebook",
"bytes": "90842"
},
{
"name": "Lex",
"bytes": "6552"
},
{
"name": "Makefile",
"bytes": "33883"
},
{
"name": "Perl",
"bytes": "380153"
},
{
"name": "Python",
"bytes": "101809"
},
{
"name": "Ruby",
"bytes": "3013"
},
{
"name": "Shell",
"bytes": "9885"
},
{
"name": "Swift",
"bytes": "12873"
},
{
"name": "Yacc",
"bytes": "10922"
}
],
"symlink_target": ""
} |
import pytest
from countries_plus.models import Country
from countries_plus.utils import GeonamesParseError, parse_geonames_data, update_geonames_data
@pytest.mark.django_db
class TestParseGeonamesData:
@pytest.fixture
def valid_data(self):
return [
r"#ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode".encode(),
r"AD AND 020 AN Andorra Andorra la Vella 468 84000 EU .ad EUR Euro 376 AD### ^(?:AD)*(\d{3})$ ca 3041565 ES,FR ".encode()
]
@pytest.fixture
def invalid_data_no_header(self):
return [
r"AD AND 020 AN Andorra Andorra la Vella 468 84000 EU .ad EUR Euro 376 AD### ^(?:AD)*(\d{3})$ ca 3041565 ES,FR ".encode()
]
@pytest.fixture
def invalid_data_bad_header(self):
return [
r"#ISO Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode".encode(),
r"AD AND 020 AN Andorra Andorra la Vella 468 84000 EU .ad EUR Euro 376 AD### ^(?:AD)*(\d{3})$ ca 3041565 ES,FR ".encode()
]
@pytest.fixture
def invalid_data_invalid_field_length(self):
return [
r"#ISO ISO3 ISO-Numeric fips Country Capital Area(in sq km) Population Continent tld CurrencyCode CurrencyName Phone Postal Code Format Postal Code Regex Languages geonameid neighbours EquivalentFipsCode".encode(),
r"AD INVALID_ISO3 020 AN Andorra Andorra la Vella 468 84000 EU .ad EUR Euro 376 AD### ^(?:AD)*(\d{3})$ ca 3041565 ES,FR ".encode()
]
def test_valid_data(self, valid_data):
parse_geonames_data(iter(valid_data))
assert Country.objects.count() == 1
def test_invalid_data_no_header(self, invalid_data_no_header):
with pytest.raises(GeonamesParseError):
parse_geonames_data(iter(invalid_data_no_header))
def test_invalid_data_bad_header(self, invalid_data_bad_header):
with pytest.raises(GeonamesParseError):
parse_geonames_data(iter(invalid_data_bad_header))
def test_invalid_data_field_length(self, invalid_data_invalid_field_length):
with pytest.raises(GeonamesParseError):
parse_geonames_data(iter(invalid_data_invalid_field_length))
def test_invalid_data_field_length_update(self, valid_data, invalid_data_invalid_field_length):
parse_geonames_data(iter(valid_data))
with pytest.raises(GeonamesParseError):
parse_geonames_data(iter(invalid_data_invalid_field_length))
@pytest.mark.django_db
class TestUpdateGeonamesData:
def test_update_geonames_data(self):
# If the geonames.org dataset adds/removes a country or changes its format
# this test will fail, which is intended.
num_updated, num_created = update_geonames_data()
assert num_updated == 0
assert num_created == 252
num_updated, num_created = update_geonames_data()
assert num_updated == 252
assert num_created == 0
assert Country.objects.count() == 252
| {
"content_hash": "c35e8ed0c35853479a3c6e97b0eb0936",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 226,
"avg_line_length": 44.45205479452055,
"alnum_prop": 0.6816640986132512,
"repo_name": "cordery/django-countries-plus",
"id": "8c718b8335055313c5662933542fe78743680996",
"size": "3270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34"
},
{
"name": "Python",
"bytes": "43480"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class UpdateMetadataDTO(Model):
"""PATCH Body schema to represent list of Metadata to be updated.
:param delete: List of Metadata associated with answer to be deleted
:type delete:
list[~azure.cognitiveservices.knowledge.qnamaker.models.MetadataDTO]
:param add: List of metadata associated with answer to be added
:type add:
list[~azure.cognitiveservices.knowledge.qnamaker.models.MetadataDTO]
"""
_attribute_map = {
'delete': {'key': 'delete', 'type': '[MetadataDTO]'},
'add': {'key': 'add', 'type': '[MetadataDTO]'},
}
def __init__(self, **kwargs):
super(UpdateMetadataDTO, self).__init__(**kwargs)
self.delete = kwargs.get('delete', None)
self.add = kwargs.get('add', None)
| {
"content_hash": "dbc7d5e4ae3ed4af31d3f12400d0583a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 35.08695652173913,
"alnum_prop": 0.6530359355638166,
"repo_name": "Azure/azure-sdk-for-python",
"id": "de526f59d55f9849670e1c517f52744612fab20a",
"size": "1281",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/cognitiveservices/azure-cognitiveservices-knowledge-qnamaker/azure/cognitiveservices/knowledge/qnamaker/models/update_metadata_dto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
if sys.version_info < (3,):
try:
from builtins import (bytes, dict, int, list, object, range, str,
ascii, chr, hex, input, next, oct, open,
pow, round, super, filter, map, zip)
from future.builtins.disabled import (apply, cmp, coerce, execfile,
file, long, raw_input,
reduce, reload,
unicode, xrange, StandardError)
except:
print("need future module (try 'pip install future')")
#
from math import *
#
import time
import profile
#
import numpy as np
import scipy
#
import matplotlib as mpl
import pylab as plt
#
import matplotlib.backends
from matplotlib.backends.backend_pgf import FigureCanvasPgf
#
mpl.rcParams['xtick.labelsize']=8
mpl.rcParams['ytick.labelsize']=8
mpl.rcParams['xtick.major.pad']=5 # xticks too close to border!
#
mpl.backend_bases.register_backend('pdf', FigureCanvasPgf)
#
pgf_with_custom_preamble = {
"font.family": "serif", # use serif/main font for text elements
# "text.usetex": True, # use inline math for ticks
# "pgf.rcfonts": False, # don't setup fonts from rc parameters
"pgf.preamble": ["\\PassOptionsToPackage{cmyk}{xcolor}"]
}
#
mpl.rcParams.update(pgf_with_custom_preamble)
#
import imp
| {
"content_hash": "364816d6d629dade639f2bf73b9ea91e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 33.16279069767442,
"alnum_prop": 0.5904628330995793,
"repo_name": "tonyshardlow/reg_sde",
"id": "8c16192454be5c461d48f330ed030037565dbe3f",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ground.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149184"
}
],
"symlink_target": ""
} |
from django.db import models
from members.models import Person, Payment
# Go Cardless model
class Mandate(models.Model):
mandate_id = models.CharField(max_length=50)
customer_id = models.CharField(max_length=50)
event_id = models.CharField(max_length=50)
active = models.BooleanField(default=False)
person = models.ForeignKey(
Person, on_delete=models.CASCADE, related_name="mandates"
)
class Payment_Event(models.Model):
created_at = models.CharField(max_length=30)
event_id = models.CharField(max_length=50)
action = models.CharField(max_length=30)
payment = models.ForeignKey(
Payment, on_delete=models.CASCADE, related_name="events", null=True
)
class Session(models.Model):
flow_id = models.CharField(max_length=80)
token = models.CharField(max_length=80)
person_id = models.IntegerField()
invoice_token = models.CharField(max_length=80, null=True)
| {
"content_hash": "cd573da6892ca05e5d62bc2cfb6faba0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 75,
"avg_line_length": 31.233333333333334,
"alnum_prop": 0.7129135538954109,
"repo_name": "ianastewart/cwltc-admin",
"id": "85695bde678ff78836bab341a87517521a21d6be",
"size": "937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cardless/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "945975"
},
{
"name": "Dockerfile",
"bytes": "882"
},
{
"name": "HTML",
"bytes": "526368"
},
{
"name": "JavaScript",
"bytes": "843481"
},
{
"name": "Python",
"bytes": "8389886"
},
{
"name": "Shell",
"bytes": "1023"
}
],
"symlink_target": ""
} |
from shadow4.syned.shape import Conic
from shadow4.beamline.optical_elements.mirrors.s4_mirror import S4MirrorElement, S4Mirror, ElementCoordinates
from shadow4.optical_surfaces.s4_conic import S4Conic
from shadow4.beamline.s4_optical_element import S4ConicOpticalElement
class S4ConicMirror(S4Mirror, S4ConicOpticalElement):
def __init__(self,
name="Conic Mirror",
boundary_shape=None,
conic_coefficients=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
# inputs related to mirror reflectivity
f_reflec=0, # reflectivity of surface: 0=no reflectivity, 1=full polarization
f_refl=0, # 0=prerefl file
# 1=electric susceptibility
# 2=user defined file (1D reflectivity vs angle)
# 3=user defined file (1D reflectivity vs energy)
# 4=user defined file (2D reflectivity vs energy and angle)
file_refl="", # preprocessor file fir f_refl=0,2,3,4
refraction_index=1.0 # refraction index (complex) for f_refl=1
):
S4ConicOpticalElement.__init__(self, conic_coefficients)
S4Mirror.__init__(self, name, boundary_shape, self._conic_surface_shape,
f_reflec, f_refl, file_refl, refraction_index)
class S4ConicMirrorElement(S4MirrorElement):
def __init__(self, optical_element=None, coordinates=None):
super().__init__(optical_element if optical_element is not None else S4ConicMirror(),
coordinates if coordinates is not None else ElementCoordinates())
if not isinstance(self.get_optical_element().get_surface_shape(), Conic):
raise ValueError("Wrong Optical Element: only Conic shape is accepted")
def apply_local_reflection(self, beam):
surface_shape = self.get_optical_element().get_surface_shape()
print(">>>>> Conic mirror")
ccc = S4Conic.initialize_from_coefficients(surface_shape.get_conic_coefficients())
mirr, normal = ccc.apply_specular_reflection_on_beam(beam)
return mirr, normal
| {
"content_hash": "7d559bce748c6b1aef8aa69609c2c23b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 109,
"avg_line_length": 51.476190476190474,
"alnum_prop": 0.6387604070305273,
"repo_name": "srio/minishadow",
"id": "e1310d4a758efacd33800d2af0869d4055dca075",
"size": "2162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shadow4/beamline/optical_elements/mirrors/s4_conic_mirror.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "575753"
}
],
"symlink_target": ""
} |
"""PowerTransform bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"PowerTransform",
]
class PowerTransform(bijector.Bijector):
"""Compute `Y = g(X) = (1 + X * c)**(1 / c), X >= -1 / c`.
The [power transform](https://en.wikipedia.org/wiki/Power_transform) maps
inputs from `[0, inf]` to `[-1/c, inf]`; this is equivalent to the `inverse`
of this bijector.
This bijector is equivalent to the `Exp` bijector when `c=0`.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tfp.distributions`.",
warn_once=True)
def __init__(self,
power=0.,
validate_args=False,
name="power_transform"):
"""Instantiates the `PowerTransform` bijector.
Args:
power: Python `float` scalar indicating the transform power, i.e.,
`Y = g(X) = (1 + X * c)**(1 / c)` where `c` is the `power`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `power < 0` or is not known statically.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
with self._name_scope("init", values=[power]):
power = tensor_util.constant_value(
ops.convert_to_tensor(power, name="power"))
if power is None or power < 0:
raise ValueError("`power` must be a non-negative TF constant.")
self._power = power
super(PowerTransform, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
@property
def power(self):
"""The `c` in: `Y = g(X) = (1 + X * c)**(1 / c)`."""
return self._power
def _forward(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return math_ops.exp(x)
# If large x accuracy is an issue, consider using:
# (1. + x * self.power)**(1. / self.power) when x >> 1.
return math_ops.exp(math_ops.log1p(x * self.power) / self.power)
def _inverse(self, y):
y = self._maybe_assert_valid_y(y)
if self.power == 0.:
return math_ops.log(y)
# If large y accuracy is an issue, consider using:
# (y**self.power - 1.) / self.power when y >> 1.
return math_ops.expm1(math_ops.log(y) * self.power) / self.power
def _inverse_log_det_jacobian(self, y):
y = self._maybe_assert_valid_y(y)
return (self.power - 1.) * math_ops.log(y)
def _forward_log_det_jacobian(self, x):
x = self._maybe_assert_valid_x(x)
if self.power == 0.:
return x
return (1. / self.power - 1.) * math_ops.log1p(x * self.power)
def _maybe_assert_valid_x(self, x):
if not self.validate_args or self.power == 0.:
return x
is_valid = check_ops.assert_non_negative(
1. + self.power * x,
message="Forward transformation input must be at least {}.".format(
-1. / self.power))
return control_flow_ops.with_dependencies([is_valid], x)
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return y
is_valid = check_ops.assert_positive(
y, message="Inverse transformation input must be greater than 0.")
return control_flow_ops.with_dependencies([is_valid], y)
| {
"content_hash": "81c692bc9392731e9708ab096df5ffb7",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 34.30701754385965,
"alnum_prop": 0.6343646126310406,
"repo_name": "hfp/tensorflow-xsmm",
"id": "81b0792525f5f6e2335a7e0b7c0e3b8129224811",
"size": "4600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/bijectors/power_transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "523814"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "53558932"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1303624"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "896901"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75333"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "12166"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43811576"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "502374"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from PyQt5.QtCore import pyqtSignal, Qt, QTimer
from PyQt5.QtWidgets import QProgressDialog
from . import performer
class Progress(QProgressDialog, performer.ThreadedJobPerformer):
finished = pyqtSignal(['QString'])
def __init__(self, parent):
flags = Qt.CustomizeWindowHint | Qt.WindowTitleHint | Qt.WindowSystemMenuHint
QProgressDialog.__init__(self, '', "Cancel", 0, 100, parent, flags)
self.setModal(True)
self.setAutoReset(False)
self.setAutoClose(False)
self._timer = QTimer()
self._jobid = ''
self._timer.timeout.connect(self.updateProgress)
def updateProgress(self):
# the values might change before setValue happens
last_progress = self.last_progress
last_desc = self.last_desc
if not self._job_running or last_progress is None:
self._timer.stop()
self.close()
if not self.job_cancelled:
self.finished.emit(self._jobid)
return
if self.wasCanceled():
self.job_cancelled = True
return
if last_desc:
self.setLabelText(last_desc)
self.setValue(last_progress)
def run(self, jobid, title, target, args=()):
self._jobid = jobid
self.reset()
self.setLabelText('')
self.run_threaded(target, args)
self.setWindowTitle(title)
self.show()
self._timer.start(500)
| {
"content_hash": "0c3e93150a6a9f04bd857979efe2d0c5",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 33.11363636363637,
"alnum_prop": 0.6135895676046671,
"repo_name": "hsoft/hscommon",
"id": "70901385070157a6a9fee5e277e2ff2e92aca774",
"size": "1787",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jobprogress/qt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "219358"
}
],
"symlink_target": ""
} |
"""package github branch
Revision ID: 025e06b85efc
Revises: f0b00081fda9
Create Date: 2017-12-01 20:37:28.967575
"""
from alembic import op
import sqlalchemy as sa
from docassemble.webapp.database import dbtableprefix
# revision identifiers, used by Alembic.
revision = '025e06b85efc'
down_revision = 'f0b00081fda9'
branch_labels = None
depends_on = None
def upgrade():
op.add_column(dbtableprefix + 'package', sa.Column('gitbranch', sa.String(255)))
def downgrade():
op.drop_column(dbtableprefix + 'package', 'gitbranch')
| {
"content_hash": "288ee5f6876000ff0997d6b0cd47d383",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.7509293680297398,
"repo_name": "jhpyle/docassemble",
"id": "58c18a82cf70f05a6a7c6452b07b1ba218297259",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docassemble_webapp/docassemble/webapp/alembic/versions/025e06b85efc_package_github_branch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "156"
},
{
"name": "CSS",
"bytes": "87682"
},
{
"name": "Dockerfile",
"bytes": "6731"
},
{
"name": "Emacs Lisp",
"bytes": "3393"
},
{
"name": "Gherkin",
"bytes": "518252"
},
{
"name": "HTML",
"bytes": "176555"
},
{
"name": "JavaScript",
"bytes": "1903341"
},
{
"name": "Mako",
"bytes": "1042"
},
{
"name": "Python",
"bytes": "4402435"
},
{
"name": "Rich Text Format",
"bytes": "120028"
},
{
"name": "Shell",
"bytes": "146777"
},
{
"name": "TeX",
"bytes": "15582"
}
],
"symlink_target": ""
} |
import sys
from cx_Freeze import setup, Executable
build_exe_options = {"packages": ["numbers", "OpenGL"], "excludes": ["sfml"], "include_msvcr": "true"}
base = None
if sys.platform == "win32":
base = "Win32GUI"
setup(name="pymazing",
version="0.1.0",
options = {"build_exe": build_exe_options},
executables = [Executable("pymazing.py", base=base)])
# after build, copy the data directory and sfml (site-packages) directory to the build directory
| {
"content_hash": "53af00d38e014d0edff5f3bec1fb10c2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 102,
"avg_line_length": 30.625,
"alnum_prop": 0.6469387755102041,
"repo_name": "mikoro/pymazing",
"id": "aab806d6a7f713cdce4e3099a347d91b756762f1",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84215"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
} |
import collections
import itertools
import operator
from six import iteritems
from .common import *
from .datastructures import OrderedDict, Context
from .exceptions import *
from .types.compound import ModelType
from .undefined import Undefined
from .util import listify
try:
basestring #PY2
except NameError:
basestring = str #PY3
try:
unicode #PY2
except:
import codecs
unicode = str #PY3
###
# Transform loops
###
def import_loop(cls, instance_or_dict, field_converter=None, trusted_data=None,
mapping=None, partial=False, strict=False, init_values=False,
apply_defaults=False, convert=True, validate=False, new=False,
app_data=None, context=None):
"""
The import loop is designed to take untrusted data and convert it into the
native types, as described in ``cls``. It does this by calling
``field_converter`` on every field.
Errors are aggregated and returned by throwing a ``ModelConversionError``.
:param cls:
The class for the model.
:param instance_or_dict:
A dict of data to be converted into types according to ``cls``.
:param field_converter:
This function is applied to every field found in ``instance_or_dict``.
:param trusted_data:
A ``dict``-like structure that may contain already validated data.
:param partial:
Allow partial data to validate; useful for PATCH requests.
Essentially drops the ``required=True`` arguments from field
definitions. Default: False
:param strict:
Complain about unrecognized keys. Default: False
:param apply_defaults:
Whether to set fields to their default values when not present in input data.
:param app_data:
An arbitrary container for application-specific data that needs to
be available during the conversion.
:param context:
A ``Context`` object that encapsulates configuration options and ``app_data``.
The context object is created upon the initial invocation of ``import_loop``
and is then propagated through the entire process.
"""
if instance_or_dict is None:
got_data = False
else:
got_data = True
if got_data and not isinstance(instance_or_dict, (cls, dict)):
raise ConversionError('Model conversion requires a model or dict')
context = Context._make(context)
try:
context.initialized
except:
context._setdefaults({
'initialized': True,
'field_converter': field_converter,
'mapping': mapping or {},
'partial': partial,
'strict': strict,
'init_values': init_values,
'apply_defaults': apply_defaults,
'convert': convert,
'validate': validate,
'new': new,
'app_data': app_data if app_data is not None else {}
})
_model_mapping = context.mapping.get('model_mapping')
data = dict(trusted_data) if trusted_data else {}
errors = {}
# Determine all acceptable field input names
all_fields = set(cls._fields) ^ set(cls._serializables)
for field_name, field, in iteritems(cls._fields):
if field.serialized_name:
all_fields.add(field.serialized_name)
if field.deserialize_from:
all_fields.update(set(listify(field.deserialize_from)))
if field_name in context.mapping:
all_fields.update(set(listify(context.mapping[field_name])))
if got_data and context.strict:
# Check for rogues if strict is set
rogue_fields = set(instance_or_dict) - all_fields
if len(rogue_fields) > 0:
for field in rogue_fields:
errors[field] = 'Rogue field'
for field_name, field in iteritems(cls._fields):
value = Undefined
serialized_field_name = field_name
if got_data:
trial_keys = listify(field.deserialize_from)
trial_keys.extend(listify(context.mapping.get(field_name, [])))
if field.serialized_name:
serialized_field_name = field.serialized_name
trial_keys.append(field.serialized_name)
trial_keys.append(field_name)
for key in trial_keys:
if key and key in instance_or_dict:
value = instance_or_dict[key]
if value is Undefined:
if field_name in data:
continue
if context.apply_defaults:
value = field.default
if value is Undefined and context.init_values:
value = None
if got_data:
if field.is_compound:
if _model_mapping:
submap = _model_mapping.get(field_name)
else:
submap = {}
field_context = context._branch(mapping=submap)
else:
field_context = context
try:
value = context.field_converter(field, value, field_context)
except (FieldError, CompoundError) as exc:
errors[serialized_field_name] = exc
if isinstance(exc, DataError):
data[field_name] = exc.partial_data
continue
data[field_name] = value
if errors:
partial_data = dict(((key, value) for key, value in data.items() if value is not Undefined))
raise DataError(errors, partial_data)
return data
def export_loop(cls, instance_or_dict, field_converter=None, role=None, raise_error_on_role=True,
export_level=None, app_data=None, context=None):
"""
The export_loop function is intended to be a general loop definition that
can be used for any form of data shaping, such as application of roles or
how a field is transformed.
:param cls:
The model definition.
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectionation for this structure is that it implements a ``dict``
interface.
:param field_converter:
This function is applied to every field found in ``instance_or_dict``.
:param role:
The role used to determine if fields should be left out of the
transformation.
:param raise_error_on_role:
This parameter enforces strict behavior which requires substructures
to have the same role definition as their parent structures.
:param app_data:
An arbitrary container for application-specific data that needs to
be available during the conversion.
:param context:
A ``Context`` object that encapsulates configuration options and ``app_data``.
The context object is created upon the initial invocation of ``import_loop``
and is then propagated through the entire process.
"""
context = Context._make(context)
try:
context.initialized
except:
context._setdefaults({
'initialized': True,
'field_converter': field_converter,
'role': role,
'raise_error_on_role': raise_error_on_role,
'export_level': export_level,
'app_data': app_data if app_data is not None else {}
})
data = {}
# Translate `role` into `gottago` function
gottago = wholelist()
if hasattr(cls, '_options') and context.role in cls._options.roles:
gottago = cls._options.roles[context.role]
elif context.role and context.raise_error_on_role:
error_msg = u'%s Model has no role "%s"'
raise ValueError(error_msg % (cls.__name__, context.role))
else:
gottago = cls._options.roles.get("default", gottago)
fields_order = (getattr(cls._options, 'fields_order', None)
if hasattr(cls, '_options') else None)
for field_name, field, value in atoms(cls, instance_or_dict):
serialized_name = field.serialized_name or field_name
# Skipping this field was requested
if gottago(field_name, value):
continue
_export_level = field.get_export_level(context)
if _export_level == DROP:
continue
elif value not in (None, Undefined):
value = context.field_converter(field, value, context)
if value is Undefined:
if _export_level <= DEFAULT:
continue
elif value is None:
if _export_level <= NOT_NONE:
continue
elif field.is_compound and len(value) == 0:
if _export_level <= NONEMPTY:
continue
if value is Undefined:
value = None
data[serialized_name] = value
if fields_order:
data = sort_dict(data, fields_order)
return data
def sort_dict(dct, based_on):
"""
Sorts provided dictionary based on order of keys provided in ``based_on``
list.
Order is not guarantied in case if ``dct`` has keys that are not present
in ``based_on``
:param dct:
Dictionary to be sorted.
:param based_on:
List of keys in order that resulting dictionary should have.
:return:
OrderedDict with keys in the same order as provided ``based_on``.
"""
return OrderedDict(
sorted(
dct.items(),
key=lambda el: based_on.index(el[0] if el[0] in based_on else -1))
)
def atoms(cls, instance_or_dict):
"""
Iterator for the atomic components of a model definition and relevant
data that creates a 3-tuple of the field's name, its type instance and
its value.
:param cls:
The model definition.
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectation for this structure is that it implements a ``Mapping``
interface.
"""
all_fields = itertools.chain(iteritems(cls._fields),
iteritems(cls._serializables))
return ((field_name, field, instance_or_dict.get(field_name, Undefined))
for field_name, field in all_fields)
###
# Field filtering
###
class Role(collections.Set):
"""
A ``Role`` object can be used to filter specific fields against a sequence.
The ``Role`` is two things: a set of names and a function. The function
describes how filter taking a field name as input and then returning either
``True`` or ``False``, indicating that field should or should not be
skipped.
A ``Role`` can be operated on as a ``Set`` object representing the fields
is has an opinion on. When Roles are combined with other roles, the
filtering behavior of the first role is used.
"""
def __init__(self, function, fields):
self.function = function
self.fields = set(fields)
def _from_iterable(self, iterable):
return Role(self.function, iterable)
def __contains__(self, value):
return value in self.fields
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __eq__(self, other):
print(dir(self.function))
return (self.function.__name__ == other.function.__name__ and
self.fields == other.fields)
def __str__(self):
return '%s(%s)' % (self.function.__name__,
', '.join("'%s'" % f for f in self.fields))
def __repr__(self):
return '<Role %s>' % str(self)
# edit role fields
def __add__(self, other):
fields = self.fields.union(other)
return self._from_iterable(fields)
def __sub__(self, other):
fields = self.fields.difference(other)
return self._from_iterable(fields)
# apply role to field
def __call__(self, name, value):
return self.function(name, value, self.fields)
# static filter functions
@staticmethod
def wholelist(name, value, seq):
"""
Accepts a field name, value, and a field list. This functions
implements acceptance of all fields by never requesting a field be
skipped, thus returns False for all input.
:param name:
The field name to inspect.
:param value:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
return False
@staticmethod
def whitelist(name, value, seq):
"""
Implements the behavior of a whitelist by requesting a field be skipped
whenever it's name is not in the list of fields.
:param name:
The field name to inspect.
:param value:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
if seq is not None and len(seq) > 0:
return name not in seq
return True
@staticmethod
def blacklist(name, value, seq):
"""
Implements the behavior of a blacklist by requesting a field be skipped
whenever it's name is found in the list of fields.
:param k:
The field name to inspect.
:param v:
The field's value.
:param seq:
The list of fields associated with the ``Role``.
"""
if seq is not None and len(seq) > 0:
return name in seq
return False
def wholelist(*field_list):
"""
Returns a function that evicts nothing. Exists mainly to be an explicit
allowance of all fields instead of a using an empty blacklist.
"""
return Role(Role.wholelist, field_list)
def whitelist(*field_list):
"""
Returns a function that operates as a whitelist for the provided list of
fields.
A whitelist is a list of fields explicitly named that are allowed.
"""
return Role(Role.whitelist, field_list)
def blacklist(*field_list):
"""
Returns a function that operates as a blacklist for the provided list of
fields.
A blacklist is a list of fields explicitly named that are not allowed.
"""
return Role(Role.blacklist, field_list)
###
# Field converter interface
###
class FieldConverter(object):
def __call__(self, field, value, context):
raise NotImplementedError
###
# Standard export converters
###
class ExportConverter(FieldConverter):
def __init__(self, format, exceptions=None):
self.primary = format
self.secondary = not format
self.exceptions = set(exceptions) if exceptions else None
def __call__(self, field, value, context):
format = self.primary
if self.exceptions:
if any((issubclass(field.typeclass, cls) for cls in self.exceptions)):
format = self.secondary
return field.export(value, format, context)
_to_native_converter = ExportConverter(NATIVE)
_to_dict_converter = ExportConverter(NATIVE, [ModelType])
_to_primitive_converter = ExportConverter(PRIMITIVE)
###
# Standard import converters
###
class ImportConverter(FieldConverter):
def __init__(self, action):
self.action = action
self.method = operator.attrgetter(self.action)
def __call__(self, field, value, context):
field.check_required(value, context)
if value in (None, Undefined):
return value
return self.method(field)(value, context)
import_converter = ImportConverter('convert')
validation_converter = ImportConverter('validate')
###
# Context stub factories
###
def get_import_context(**options):
import_options = {
'field_converter': import_converter,
'partial': False,
'strict': False,
'convert': True,
'validate': False
}
import_options.update(options)
return Context(**import_options)
###
# Import and export functions
###
def convert(cls, instance_or_dict, **kwargs):
return import_loop(cls, instance_or_dict, import_converter, **kwargs)
def to_native(cls, instance_or_dict, **kwargs):
return export_loop(cls, instance_or_dict, _to_native_converter, **kwargs)
def to_dict(cls, instance_or_dict, **kwargs):
return export_loop(cls, instance_or_dict, _to_dict_converter, **kwargs)
def to_primitive(cls, instance_or_dict, **kwargs):
return export_loop(cls, instance_or_dict, _to_primitive_converter, **kwargs)
EMPTY_LIST = "[]"
EMPTY_DICT = "{}"
def expand(data, expanded_data=None):
"""
Expands a flattened structure into it's corresponding layers. Essentially,
it is the counterpart to ``flatten_to_dict``.
:param data:
The data to expand.
:param expanded_data:
Existing expanded data that this function use for output
"""
expanded_dict = {}
context = expanded_data or expanded_dict
for key, value in iteritems(data):
try:
key, remaining = key.split(".", 1)
except ValueError:
if value == EMPTY_DICT:
value = {}
if key in expanded_dict:
continue
elif value == EMPTY_LIST:
value = []
if key in expanded_dict:
continue
expanded_dict[key] = value
else:
current_context = context.setdefault(key, {})
if current_context == []:
current_context = context[key] = {}
current_context.update(expand({remaining: value}, current_context))
return expanded_dict
def flatten_to_dict(instance_or_dict, prefix=None, ignore_none=True):
"""
Flattens an iterable structure into a single layer dictionary.
For example:
{
's': 'jms was hrrr',
'l': ['jms was here', 'here', 'and here']
}
becomes
{
's': 'jms was hrrr',
u'l.1': 'here',
u'l.0': 'jms was here',
u'l.2': 'and here'
}
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectation for this structure is that it implements a ``Mapping``
interface.
:param ignore_none:
This ignores any ``serialize_when_none`` settings and forces the empty
fields to be printed as part of the flattening.
Default: True
:param prefix:
This puts a prefix in front of the field names during flattening.
Default: None
"""
if isinstance(instance_or_dict, dict):
iterator = iteritems(instance_or_dict)
else:
iterator = enumerate(instance_or_dict)
flat_dict = {}
for key, value in iterator:
if prefix:
key = ".".join(map(unicode, (prefix, key)))
if value == []:
value = EMPTY_LIST
elif value == {}:
value = EMPTY_DICT
if isinstance(value, (dict, list)):
flat_dict.update(flatten_to_dict(value, prefix=key))
elif value is not None:
flat_dict[key] = value
elif not ignore_none:
flat_dict[key] = None
return flat_dict
def flatten(cls, instance_or_dict, role=None, raise_error_on_role=True,
ignore_none=True, prefix=None, app_data=None, context=None):
"""
Produces a flat dictionary representation of the model. Flat, in this
context, means there is only one level to the dictionary. Multiple layers
are represented by the structure of the key.
Example:
>>> class Foo(Model):
... s = StringType()
... l = ListType(StringType)
>>> f = Foo()
>>> f.s = 'string'
>>> f.l = ['jms', 'was here', 'and here']
>>> flatten(Foo, f)
{'s': 'string', u'l.1': 'jms', u'l.0': 'was here', u'l.2': 'and here'}
:param cls:
The model definition.
:param instance_or_dict:
The structure where fields from cls are mapped to values. The only
expectation for this structure is that it implements a ``Mapping``
interface.
:param role:
The role used to determine if fields should be left out of the
transformation.
:param raise_error_on_role:
This parameter enforces strict behavior which requires substructures
to have the same role definition as their parent structures.
:param ignore_none:
This ignores any ``serialize_when_none`` settings and forces the empty
fields to be printed as part of the flattening.
Default: True
:param prefix:
This puts a prefix in front of the field names during flattening.
Default: None
"""
data = to_primitive(cls, instance_or_dict, role=role, raise_error_on_role=raise_error_on_role,
export_level=DEFAULT, app_data=app_data, context=context)
flattened = flatten_to_dict(data, prefix=prefix, ignore_none=ignore_none)
return flattened
| {
"content_hash": "b25f3fd59f2d7339330cda5a34f7c9bf",
"timestamp": "",
"source": "github",
"line_count": 684,
"max_line_length": 100,
"avg_line_length": 30.637426900584796,
"alnum_prop": 0.6097060507730483,
"repo_name": "mlyundin/schematics",
"id": "9840c31ce253239e5a5557706aacbe4eb80c5586",
"size": "20981",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "schematics/transforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "259703"
}
],
"symlink_target": ""
} |
"""Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import os
import re
import sys
import zlib
PY3 = sys.version_info >= (3,)
if PY3:
xrange = range
# inspect.getargspec() raises DeprecationWarnings in Python 3.5.
# The two functions have compatible interfaces for the parts we need.
if PY3:
from inspect import getfullargspec as getargspec
else:
from inspect import getargspec
# Aliases for types that are spelled differently in different Python
# versions. bytes_type is deprecated and no longer used in Tornado
# itself but is left in case anyone outside Tornado is using it.
bytes_type = bytes
if PY3:
unicode_type = str
basestring_type = str
else:
# The names unicode and basestring don't exist in py3 so silence flake8.
unicode_type = unicode # noqa
basestring_type = basestring # noqa
try:
import typing # noqa
from typing import cast
_ObjectDictBase = typing.Dict[str, typing.Any]
except ImportError:
_ObjectDictBase = dict
def cast(typ, x):
return x
else:
# More imports that are only needed in type comments.
import datetime # noqa
import types # noqa
from typing import Any, AnyStr, Union, Optional, Dict, Mapping # noqa
from typing import Tuple, Match, Callable # noqa
if PY3:
_BaseString = str
else:
_BaseString = Union[bytes, unicode_type]
class ObjectDict(_ObjectDictBase):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
# type: (str) -> Any
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
# type: (str, Any) -> None
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
# type: (bytes, Optional[int]) -> bytes
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
# type: () -> bytes
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
# type: () -> bytes
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
# type: (_BaseString) -> Any
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if not isinstance(name, str):
# on python 2 a byte string is required.
name = name.encode('utf-8')
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Stubs to make mypy happy (and later for actual type-checking).
def raise_exc_info(exc_info):
# type: (Tuple[type, BaseException, types.TracebackType]) -> None
pass
def exec_in(code, glob, loc=None):
# type: (Any, Dict[str, Any], Optional[Mapping[str, Any]]) -> Any
if isinstance(code, basestring_type):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
if PY3:
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
""")
def errno_from_exception(e):
# type: (BaseException) -> Optional[int]
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno # type: ignore
elif e.args:
return e.args[0]
else:
return None
_alphanum = frozenset(
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def _re_unescape_replacement(match):
# type: (Match[str]) -> str
group = match.group(1)
if group[0] in _alphanum:
raise ValueError("cannot unescape '\\\\%s'" % group[0])
return group
_re_unescape_pattern = re.compile(r'\\(.)', re.DOTALL)
def re_unescape(s):
# type: (str) -> str
"""Unescape a string escaped by `re.escape`.
May raise ``ValueError`` for regular expressions which could not
have been produced by `re.escape` (for example, strings containing
``\d`` cannot be unescaped).
.. versionadded:: 4.4
"""
return _re_unescape_pattern.sub(_re_unescape_replacement, s)
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None # type: type
__impl_kwargs = None # type: Dict[str, Any]
def __new__(cls, *args, **kwargs):
base = cls.configurable_base()
init_kwargs = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
init_kwargs.update(base.__impl_kwargs)
else:
impl = cls
init_kwargs.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(*args, **init_kwargs)
return instance
@classmethod
def configurable_base(cls):
# type: () -> Any
# TODO: This class needs https://github.com/python/typing/issues/107
# to be fully typeable.
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
# type: () -> type
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
# type: () -> None
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
.. versionchanged:: 4.2
Now accepts positional arguments in addition to keyword arguments.
"""
@classmethod
def configure(cls, impl, **kwargs):
# type: (Any, **Any) -> None
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (str, unicode_type)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
# type: () -> type
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
# type: () -> Tuple[type, Dict[str, Any]]
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
# type: (Tuple[type, Dict[str, Any]]) -> None
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
# type: (Callable, str) -> None
self.name = name
try:
self.arg_pos = self._getargnames(func).index(name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def _getargnames(self, func):
# type: (Callable) -> List[str]
try:
return getargspec(func).args
except TypeError:
if hasattr(func, 'func_code'):
# Cython-generated code has all the attributes needed
# by inspect.getargspec, but the inspect module only
# works with ordinary functions. Inline the portion of
# getargspec that we need here. Note that for static
# functions the @cython.binding(True) decorator must
# be used (for methods it works out of the box).
code = func.func_code # type: ignore
return code.co_varnames[:code.co_argcount]
raise
def get_old_value(self, args, kwargs, default=None):
# type: (List[Any], Dict[str, Any], Any) -> Any
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
# type: (Any, List[Any], Dict[str, Any]) -> Tuple[Any, List[Any], Dict[str, Any]]
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
# type: (datetime.timedelta) -> float
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
# type: (bytes, bytes) -> bytes
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask_arr = array.array("B", mask)
unmasked_arr = array.array("B", data)
for i in xrange(len(data)):
unmasked_arr[i] = unmasked_arr[i] ^ mask_arr[i % 4]
if PY3:
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked_arr.tobytes()
else:
return unmasked_arr.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| {
"content_hash": "fd4ce7b0d90c7cbb727a5430c827d903",
"timestamp": "",
"source": "github",
"line_count": 451,
"max_line_length": 97,
"avg_line_length": 34.368070953436806,
"alnum_prop": 0.6360645161290323,
"repo_name": "zguangyu/tornado",
"id": "28e74e7dc0e3be3563b5868f34bcdc294be45b50",
"size": "15500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tornado/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1078"
},
{
"name": "CSS",
"bytes": "7736"
},
{
"name": "HTML",
"bytes": "12434"
},
{
"name": "JavaScript",
"bytes": "6088"
},
{
"name": "Python",
"bytes": "1538229"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
} |
import os
import re
import subprocess
import sys
import unittest
import sysconfig
from test.test_support import run_unittest, findfile
try:
gdb_version, _ = subprocess.Popen(["gdb", "--version"],
stdout=subprocess.PIPE).communicate()
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
gdb_version_number = re.search(r"^GNU gdb [^\d]*(\d+)\.", gdb_version)
if int(gdb_version_number.group(1)) < 7:
raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding"
" Saw:\n" + gdb_version)
# Verify that "gdb" was built with the embedded python support enabled:
cmd = "--eval-command=python import sys; print sys.version_info"
p = subprocess.Popen(["gdb", "--batch", cmd],
stdout=subprocess.PIPE)
gdbpy_version, _ = p.communicate()
if gdbpy_version == '':
raise unittest.SkipTest("gdb not built with embedded python support")
def gdb_has_frame_select():
# Does this build of gdb have gdb.Frame.select ?
cmd = "--eval-command=python print(dir(gdb.Frame))"
p = subprocess.Popen(["gdb", "--batch", cmd],
stdout=subprocess.PIPE)
stdout, _ = p.communicate()
m = re.match(r'.*\[(.*)\].*', stdout)
if not m:
raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test")
gdb_frame_dir = m.group(1).split(', ')
return "'select'" in gdb_frame_dir
HAS_PYUP_PYDOWN = gdb_has_frame_select()
class DebuggerTests(unittest.TestCase):
"""Test that the debugger can debug Python."""
def run_gdb(self, *args):
"""Runs gdb with the command line given by *args.
Returns its stdout, stderr
"""
out, err = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
).communicate()
return out, err
def get_stack_trace(self, source=None, script=None,
breakpoint='PyObject_Print',
cmds_after_breakpoint=None,
import_site=False):
'''
Run 'python -c SOURCE' under gdb with a breakpoint.
Support injecting commands after the breakpoint is reached
Returns the stdout from gdb
cmds_after_breakpoint: if provided, a list of strings: gdb commands
'''
# We use "set breakpoint pending yes" to avoid blocking with a:
# Function "foo" not defined.
# Make breakpoint pending on future shared library load? (y or [n])
# error, which typically happens python is dynamically linked (the
# breakpoints of interest are to be found in the shared library)
# When this happens, we still get:
# Function "PyObject_Print" not defined.
# emitted to stderr each time, alas.
# Initially I had "--eval-command=continue" here, but removed it to
# avoid repeated print breakpoints when traversing hierarchical data
# structures
# Generate a list of commands in gdb's language:
commands = ['set breakpoint pending yes',
'break %s' % breakpoint,
'run']
if cmds_after_breakpoint:
commands += cmds_after_breakpoint
else:
commands += ['backtrace']
# print commands
# Use "commands" to generate the arguments with which to invoke "gdb":
args = ["gdb", "--batch"]
args += ['--eval-command=%s' % cmd for cmd in commands]
args += ["--args",
sys.executable]
if not import_site:
# -S suppresses the default 'import site'
args += ["-S"]
if source:
args += ["-c", source]
elif script:
args += [script]
# print args
# print ' '.join(args)
# Use "args" to invoke gdb, capturing stdout, stderr:
out, err = self.run_gdb(*args)
# Ignore some noise on stderr due to the pending breakpoint:
err = err.replace('Function "%s" not defined.\n' % breakpoint, '')
# Ignore some other noise on stderr (http://bugs.python.org/issue8600)
err = err.replace("warning: Unable to find libthread_db matching"
" inferior's thread library, thread debugging will"
" not be available.\n",
'')
# Ensure no unexpected error messages:
self.assertEqual(err, '')
return out
def get_gdb_repr(self, source,
cmds_after_breakpoint=None,
import_site=False):
# Given an input python source representation of data,
# run "python -c'print DATA'" under gdb with a breakpoint on
# PyObject_Print and scrape out gdb's representation of the "op"
# parameter, and verify that the gdb displays the same string
#
# For a nested structure, the first time we hit the breakpoint will
# give us the top-level structure
gdb_output = self.get_stack_trace(source, breakpoint='PyObject_Print',
cmds_after_breakpoint=cmds_after_breakpoint,
import_site=import_site)
# gdb can insert additional '\n' and space characters in various places
# in its output, depending on the width of the terminal it's connected
# to (using its "wrap_here" function)
m = re.match('.*#0\s+PyObject_Print\s+\(\s*op\=\s*(.*?),\s+fp=.*\).*',
gdb_output, re.DOTALL)
if not m:
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
return m.group(1), gdb_output
def assertEndsWith(self, actual, exp_end):
'''Ensure that the given "actual" string ends with "exp_end"'''
self.assertTrue(actual.endswith(exp_end),
msg='%r did not end with %r' % (actual, exp_end))
def assertMultilineMatches(self, actual, pattern):
m = re.match(pattern, actual, re.DOTALL)
self.assertTrue(m, msg='%r did not match %r' % (actual, pattern))
def get_sample_script(self):
return findfile('gdb_sample.py')
class PrettyPrintTests(DebuggerTests):
def test_getting_backtrace(self):
gdb_output = self.get_stack_trace('print 42')
self.assertTrue('PyObject_Print' in gdb_output)
def assertGdbRepr(self, val, cmds_after_breakpoint=None):
# Ensure that gdb's rendering of the value in a debugged process
# matches repr(value) in this process:
gdb_repr, gdb_output = self.get_gdb_repr('print ' + repr(val),
cmds_after_breakpoint)
self.assertEqual(gdb_repr, repr(val), gdb_output)
def test_int(self):
'Verify the pretty-printing of various "int" values'
self.assertGdbRepr(42)
self.assertGdbRepr(0)
self.assertGdbRepr(-7)
self.assertGdbRepr(sys.maxint)
self.assertGdbRepr(-sys.maxint)
def test_long(self):
'Verify the pretty-printing of various "long" values'
self.assertGdbRepr(0L)
self.assertGdbRepr(1000000000000L)
self.assertGdbRepr(-1L)
self.assertGdbRepr(-1000000000000000L)
def test_singletons(self):
'Verify the pretty-printing of True, False and None'
self.assertGdbRepr(True)
self.assertGdbRepr(False)
self.assertGdbRepr(None)
def test_dicts(self):
'Verify the pretty-printing of dictionaries'
self.assertGdbRepr({})
self.assertGdbRepr({'foo': 'bar'})
self.assertGdbRepr({'foo': 'bar', 'douglas':42})
def test_lists(self):
'Verify the pretty-printing of lists'
self.assertGdbRepr([])
self.assertGdbRepr(range(5))
def test_strings(self):
'Verify the pretty-printing of strings'
self.assertGdbRepr('')
self.assertGdbRepr('And now for something hopefully the same')
self.assertGdbRepr('string with embedded NUL here \0 and then some more text')
self.assertGdbRepr('this is byte 255:\xff and byte 128:\x80')
def test_tuples(self):
'Verify the pretty-printing of tuples'
self.assertGdbRepr(tuple())
self.assertGdbRepr((1,))
self.assertGdbRepr(('foo', 'bar', 'baz'))
def test_unicode(self):
'Verify the pretty-printing of unicode values'
# Test the empty unicode string:
self.assertGdbRepr(u'')
self.assertGdbRepr(u'hello world')
# Test printing a single character:
# U+2620 SKULL AND CROSSBONES
self.assertGdbRepr(u'\u2620')
# Test printing a Japanese unicode string
# (I believe this reads "mojibake", using 3 characters from the CJK
# Unified Ideographs area, followed by U+3051 HIRAGANA LETTER KE)
self.assertGdbRepr(u'\u6587\u5b57\u5316\u3051')
# Test a character outside the BMP:
# U+1D121 MUSICAL SYMBOL C CLEF
# This is:
# UTF-8: 0xF0 0x9D 0x84 0xA1
# UTF-16: 0xD834 0xDD21
# This will only work on wide-unicode builds:
self.assertGdbRepr(u"\U0001D121")
def test_sets(self):
'Verify the pretty-printing of sets'
self.assertGdbRepr(set())
self.assertGdbRepr(set(['a', 'b']))
self.assertGdbRepr(set([4, 5, 6]))
# Ensure that we handled sets containing the "dummy" key value,
# which happens on deletion:
gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b'])
s.pop()
print s''')
self.assertEqual(gdb_repr, "set(['b'])")
def test_frozensets(self):
'Verify the pretty-printing of frozensets'
self.assertGdbRepr(frozenset())
self.assertGdbRepr(frozenset(['a', 'b']))
self.assertGdbRepr(frozenset([4, 5, 6]))
def test_exceptions(self):
# Test a RuntimeError
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
raise RuntimeError("I am an error")
except RuntimeError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.RuntimeError('I am an error',)")
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
a = 1 / 0
except ZeroDivisionError, e:
print e
''')
self.assertEqual(gdb_repr,
"exceptions.ZeroDivisionError('integer division or modulo by zero',)")
def test_classic_class(self):
'Verify the pretty-printing of classic class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected classic-class rendering %r' % gdb_repr)
def test_modern_class(self):
'Verify the pretty-printing of new-style class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_list(self):
'Verify the pretty-printing of an instance of a list subclass'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(list):
pass
foo = Foo()
foo += [1, 2, 3]
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_tuple(self):
'Verify the pretty-printing of an instance of a tuple subclass'
# This should exercise the negative tp_dictoffset code in the
# new-style class support
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(tuple):
pass
foo = Foo((1, 2, 3))
foo.an_int = 42
print foo''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def assertSane(self, source, corruption, expvalue=None, exptype=None):
'''Run Python under gdb, corrupting variables in the inferior process
immediately before taking a backtrace.
Verify that the variable's representation is the expected failsafe
representation'''
if corruption:
cmds_after_breakpoint=[corruption, 'backtrace']
else:
cmds_after_breakpoint=['backtrace']
gdb_repr, gdb_output = \
self.get_gdb_repr(source,
cmds_after_breakpoint=cmds_after_breakpoint)
if expvalue:
if gdb_repr == repr(expvalue):
# gdb managed to print the value in spite of the corruption;
# this is good (see http://bugs.python.org/issue8330)
return
if exptype:
pattern = '<' + exptype + ' at remote 0x[0-9a-f]+>'
else:
# Match anything for the type name; 0xDEADBEEF could point to
# something arbitrary (see http://bugs.python.org/issue8330)
pattern = '<.* at remote 0x[0-9a-f]+>'
m = re.match(pattern, gdb_repr)
if not m:
self.fail('Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_NULL_ptr(self):
'Ensure that a NULL PyObject* is handled gracefully'
gdb_repr, gdb_output = (
self.get_gdb_repr('print 42',
cmds_after_breakpoint=['set variable op=0',
'backtrace'])
)
self.assertEqual(gdb_repr, '0x0')
def test_NULL_ob_type(self):
'Ensure that a PyObject* with NULL ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0')
def test_corrupt_ob_type(self):
'Ensure that a PyObject* with a corrupt ob_type is handled gracefully'
self.assertSane('print 42',
'set op->ob_type=0xDEADBEEF',
expvalue=42)
def test_corrupt_tp_flags(self):
'Ensure that a PyObject* with a type with corrupt tp_flags is handled'
self.assertSane('print 42',
'set op->ob_type->tp_flags=0x0',
expvalue=42)
def test_corrupt_tp_name(self):
'Ensure that a PyObject* with a type with corrupt tp_name is handled'
self.assertSane('print 42',
'set op->ob_type->tp_name=0xDEADBEEF',
expvalue=42)
def test_NULL_instance_dict(self):
'Ensure that a PyInstanceObject with with a NULL in_dict is handled'
self.assertSane('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
print foo''',
'set ((PyInstanceObject*)op)->in_dict = 0',
exptype='Foo')
def test_builtins_help(self):
'Ensure that the new-style class _Helper in site.py can be handled'
# (this was the issue causing tracebacks in
# http://bugs.python.org/issue8032#msg100537 )
gdb_repr, gdb_output = self.get_gdb_repr('print __builtins__.help', import_site=True)
m = re.match(r'<_Helper at remote 0x[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected rendering %r' % gdb_repr)
def test_selfreferential_list(self):
'''Ensure that a reference loop involving a list doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [...]]')
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; print a")
self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]')
def test_selfreferential_dict(self):
'''Ensure that a reference loop involving a dict doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; print a")
self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}")
def test_selfreferential_old_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_selfreferential_new_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_attr = foo
print foo''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
a = Foo()
b = Foo()
a.an_attr = b
b.an_attr = a
print a''')
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x[0-9a-f]+>\) at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_truncation(self):
'Verify that very long output is truncated'
gdb_repr, gdb_output = self.get_gdb_repr('print range(1000)')
self.assertEqual(gdb_repr,
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "
"14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, "
"27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, "
"40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, "
"53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, "
"66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, "
"79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, "
"92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, "
"104, 105, 106, 107, 108, 109, 110, 111, 112, 113, "
"114, 115, 116, 117, 118, 119, 120, 121, 122, 123, "
"124, 125, 126, 127, 128, 129, 130, 131, 132, 133, "
"134, 135, 136, 137, 138, 139, 140, 141, 142, 143, "
"144, 145, 146, 147, 148, 149, 150, 151, 152, 153, "
"154, 155, 156, 157, 158, 159, 160, 161, 162, 163, "
"164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, "
"184, 185, 186, 187, 188, 189, 190, 191, 192, 193, "
"194, 195, 196, 197, 198, 199, 200, 201, 202, 203, "
"204, 205, 206, 207, 208, 209, 210, 211, 212, 213, "
"214, 215, 216, 217, 218, 219, 220, 221, 222, 223, "
"224, 225, 226...(truncated)")
self.assertEqual(len(gdb_repr),
1024 + len('...(truncated)'))
def test_builtin_function(self):
gdb_repr, gdb_output = self.get_gdb_repr('print len')
self.assertEqual(gdb_repr, '<built-in function len>')
def test_builtin_method(self):
gdb_repr, gdb_output = self.get_gdb_repr('import sys; print sys.stdout.readlines')
self.assertTrue(re.match('<built-in method readlines of file object at remote 0x[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_frames(self):
gdb_output = self.get_stack_trace('''
def foo(a, b, c):
pass
foo(3, 4, 5)
print foo.__code__''',
breakpoint='PyObject_Print',
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)op)->co_zombieframe)']
)
self.assertTrue(re.match(r'.*\s+\$1 =\s+Frame 0x[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
gdb_output,
re.DOTALL),
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
class PyListTests(DebuggerTests):
def assertListing(self, expected, actual):
self.assertEndsWith(actual, expected)
def test_basic_command(self):
'Verify that the "py-list" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list'])
self.assertListing(' 5 \n'
' 6 def bar(a, b, c):\n'
' 7 baz(a, b, c)\n'
' 8 \n'
' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_one_abs_arg(self):
'Verify the "py-list" command with one absolute argument'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 9'])
self.assertListing(' 9 def baz(*args):\n'
' >10 print(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_two_abs_args(self):
'Verify the "py-list" command with two absolute arguments'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 1,3'])
self.assertListing(' 1 # Sample script for use by test_gdb.py\n'
' 2 \n'
' 3 def foo(a, b, c):\n',
bt)
class StackNavigationTests(DebuggerTests):
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_pyup_command(self):
'Verify that the "py-up" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
$''')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_down_at_bottom(self):
'Verify handling of "py-down" at the bottom of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-down'])
self.assertEndsWith(bt,
'Unable to find a newer python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_up_at_top(self):
'Verify handling of "py-up" at the top of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'] * 4)
self.assertEndsWith(bt,
'Unable to find an older python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_up_then_down(self):
'Verify "py-up" followed by "py-down"'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-down'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 10, in baz \(args=\(1, 2, 3\)\)
print\(42\)
$''')
class PyBtTests(DebuggerTests):
def test_basic_command(self):
'Verify that the "py-bt" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
bar\(a, b, c\)
#[0-9]+ Frame 0x[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
foo\(1, 2, 3\)
''')
class PyPrintTests(DebuggerTests):
def test_basic_command(self):
'Verify that the "py-print" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print args'])
self.assertMultilineMatches(bt,
r".*\nlocal 'args' = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_print_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-print c', 'py-print b', 'py-print a'])
self.assertMultilineMatches(bt,
r".*\nlocal 'c' = 3\nlocal 'b' = 2\nlocal 'a' = 1\n.*")
def test_printing_global(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print __name__'])
self.assertMultilineMatches(bt,
r".*\nglobal '__name__' = '__main__'\n.*")
def test_printing_builtin(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print len'])
self.assertMultilineMatches(bt,
r".*\nbuiltin 'len' = <built-in function len>\n.*")
class PyLocalsTests(DebuggerTests):
def test_basic_command(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-locals'])
self.assertMultilineMatches(bt,
r".*\nargs = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_locals_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-locals'])
self.assertMultilineMatches(bt,
r".*\na = 1\nb = 2\nc = 3\n.*")
def test_main():
cflags = sysconfig.get_config_vars()['PY_CFLAGS']
final_opt = ""
for opt in cflags.split():
if opt.startswith('-O'):
final_opt = opt
if final_opt and final_opt != '-O0':
raise unittest.SkipTest("Python was built with compiler optimizations, "
"tests can't reliably succeed")
run_unittest(PrettyPrintTests,
PyListTests,
StackNavigationTests,
PyBtTests,
PyPrintTests,
PyLocalsTests
)
if __name__ == "__main__":
test_main()
| {
"content_hash": "8d8841edd55be17780a1a799cef37bb7",
"timestamp": "",
"source": "github",
"line_count": 694,
"max_line_length": 127,
"avg_line_length": 40.46974063400577,
"alnum_prop": 0.5400199387595244,
"repo_name": "xxd3vin/spp-sdk",
"id": "e033dcf1fd93d1d288187e3e4d4294f2ef31bca1",
"size": "28257",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opt/Python27/Lib/test/test_gdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "759663"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "56155"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "JavaScript",
"bytes": "163687"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Pascal",
"bytes": "8738"
},
{
"name": "Python",
"bytes": "22177886"
},
{
"name": "Shell",
"bytes": "15704"
},
{
"name": "Tcl",
"bytes": "2065501"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import binascii
import datetime
import json
import os
import shutil
import tempfile
import time
import urllib
import unittest
import requests
import rsa
from rsa import key
from testfixtures import Replacer
from nexus import token_utils
class TestTokenUtils(unittest.TestCase):
def setUp(self):
self.replacer = Replacer()
def tearDown(self):
self.replacer.restore()
def test_validate(self):
expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=5)
expiry = int(time.mktime(expiry.timetuple()))
cert_url = 'http://tester.com/cert1'
expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=5)
unsigned_token = "un=test|expiry={0}|SigningSubject={1}|expiry={2}".format(expiry,
cert_url, time.mktime(expires.timetuple()))
unsigned_token = unsigned_token
pub_key, priv_key = key.newkeys(1024)
sig = rsa.sign(unsigned_token, priv_key, 'SHA-256')
tmp_dir = tempfile.mkdtemp()
os.environ['NEXUS_CACHE_PATH'] = tmp_dir
encoded_sig = binascii.hexlify(sig)
signed_token = "{0}|sig={1}".format(unsigned_token,
encoded_sig)
response = requests.Response()
response._content = json.dumps({'pubkey':pub_key.save_pkcs1()})
self.replacer.replace('requests.get', lambda *args, **kwargs: response)
token_utils.validate_token(signed_token)
shutil.rmtree(tmp_dir)
def test_request_access_token(self):
response = requests.Response()
response.status_code = requests.codes.ok
access_token = {
"access_token": "faohwefadfawaw",
"refresh_token": "fhajhkjbhrafw",
"expires_in": 123456789,
"token_type": "Bearer"
}
response._content = json.dumps(access_token)
self.replacer.replace('requests.post', lambda *args, **kwargs: response)
token_map = token_utils.request_access_token('myid',
'mysecret','theauthcode', 'http://oauth.org/2/authorize')
self.assertEqual('faohwefadfawaw', token_map['access_token'])
self.assertEqual('faohwefadfawaw', token_map.access_token)
self.assertEqual('fhajhkjbhrafw', token_map['refresh_token'])
self.assertEqual('fhajhkjbhrafw', token_map.refresh_token)
self.assertEqual(123456789, token_map['expires_in'])
self.assertEqual(123456789, token_map.expires_in)
self.assertEqual('Bearer', token_map['token_type'])
self.assertEqual('Bearer', token_map.token_type)
def test_request_access_token_failure(self):
error_message = {
'error_reason': 'authorization not given'
}
response = requests.Response()
response.status_code = requests.codes.conflict
response._content = json.dumps(error_message)
self.replacer.replace('requests.post', lambda *args, **kwargs: response)
call = lambda: token_utils.request_access_token('myid',
'mysecret','theauthcode', 'http://oauth.org/2/authorize')
self.assertRaises(token_utils.TokenRequestError, call)
| {
"content_hash": "866cebc140ca585a97ca855c016c6587",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 90,
"avg_line_length": 37.03488372093023,
"alnum_prop": 0.6383045525902669,
"repo_name": "kbase/auth_service",
"id": "4901f376295e87c9695bc677ffdee532cb3cf24f",
"size": "3185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-libs/python-nexus-client/lib/tests/token_util_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "8892"
},
{
"name": "Perl",
"bytes": "3551"
},
{
"name": "Python",
"bytes": "187138"
},
{
"name": "Shell",
"bytes": "932"
}
],
"symlink_target": ""
} |
class Solution(object):
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
if root == None:
return
self.flatten(root.left)
self.flatten(root.right)
leftTail = root.left
if leftTail != None:
while leftTail.right != None:
leftTail = leftTail.right
leftTail.right = root.right
root.right = root.left
root.left = None
return
| {
"content_hash": "8fac08657ebf199286c0a15262f0383e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 30.61111111111111,
"alnum_prop": 0.5263157894736842,
"repo_name": "hawkphantomnet/leetcode",
"id": "7996228ecb07d097ddaacebeb40bc1aa9bb25ce6",
"size": "722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FlattenBinaryTreeToLinkedList/Solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158603"
},
{
"name": "C++",
"bytes": "258279"
},
{
"name": "Java",
"bytes": "234161"
},
{
"name": "JavaScript",
"bytes": "122393"
},
{
"name": "Python",
"bytes": "143293"
},
{
"name": "Ruby",
"bytes": "88479"
},
{
"name": "Shell",
"bytes": "791"
},
{
"name": "Swift",
"bytes": "554"
}
],
"symlink_target": ""
} |
from cumulusci.tasks.salesforce import BaseSalesforceApiTask
class ConfigureCAD(BaseSalesforceApiTask):
def _run_task(self):
self.sf.CurrencyType.create({
'IsoCode': 'CAD',
'IsCorporate': False,
'IsActive': True,
'DecimalPlaces': 2,
'ConversionRate': 1.3,
})
| {
"content_hash": "24b10bbf47383ed990d79642a3f1bd77",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 28.416666666666668,
"alnum_prop": 0.5835777126099707,
"repo_name": "Zosoled/Cumulus",
"id": "aa42d8c037438ab183ad7acbdb14d777c675dc17",
"size": "341",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tasks/multicurrency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Apex",
"bytes": "7308585"
},
{
"name": "CSS",
"bytes": "28528"
},
{
"name": "HTML",
"bytes": "207879"
},
{
"name": "JavaScript",
"bytes": "457358"
},
{
"name": "Python",
"bytes": "92117"
},
{
"name": "RobotFramework",
"bytes": "114253"
},
{
"name": "Shell",
"bytes": "2579"
},
{
"name": "TeX",
"bytes": "2663"
}
],
"symlink_target": ""
} |
name = 'hdcycles'
version = '0.15.0'
authors = [
'benjamin.skinner',
]
requires = [
'cycles-1.13.0-ta.1.14.7',
]
variants = [
['platform-windows', 'arch-x64', 'os-windows-10', 'usd-20.05-ta.1.2'],
['platform-windows', 'arch-x64', 'os-windows-10', 'usd-20.11'],
['platform-windows', 'arch-x64', 'os-windows-10', 'houdini-18.5'],
['platform-linux', 'arch-x86_64', 'os-centos-7', 'houdini-18.5'],
]
build_system = "cmake"
# At Tangent rez-release is external by default,
# this forces a rez-release as an internal package
with scope("config") as c:
import sys
if 'win' in str(sys.platform):
c.release_packages_path = "R:/int"
else:
c.release_packages_path = "/r/int"
# At Tangent we have a visual studio package which
# exposes the visual studio compiler for rez.
@early()
def private_build_requires():
import sys
if 'win' in str(sys.platform):
return ['cmake-3.18<3.20', 'visual_studio', 'Jinja2']
else:
return ['cmake-3.18<3.20', 'gcc-6']
# Pass along rez version to cmake build
def pre_build_commands():
env.HDCYCLES_BUILD_VERSION_MAJOR.set(this.version.major)
env.HDCYCLES_BUILD_VERSION_MINOR.set(this.version.minor)
env.HDCYCLES_BUILD_VERSION_PATCH.set(this.version.patch)
env.HDCYCLES_BUILD_VERSION.set(str(this.version))
# Main commands for rez build and environment
def commands():
env.HDCYCLES_ROOT.set('{root}')
env.HDCYCLES_PLUGIN_ROOT.set('{root}/plugin')
env.HDCYCLES_TOOLS_ROOT.set('{root}/tools')
env.PXR_PLUGINPATH_NAME.append('{root}/plugin/usd/ndrCycles/resources')
env.PXR_PLUGINPATH_NAME.append('{root}/plugin/usd/usdCycles/resources')
env.PXR_PLUGINPATH_NAME.append('{root}/plugin/usd/hdCycles/resources')
env.PYTHONPATH.prepend('{root}/plugin/python')
# required on windows
env.PATH.append('{root}/plugin/usd')
# For houdini_cycles to locate the schema
env.USD_CYCLES_GENERATED_SCHEMA.set('{root}/plugin/usd/usdCycles/resources/generatedSchema.usda')
| {
"content_hash": "66c2282f7f12c5975b50db6612f8caf8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 101,
"avg_line_length": 31.29230769230769,
"alnum_prop": 0.6696165191740413,
"repo_name": "tangent-opensource/hdBlackbird",
"id": "ac5c9c60e5026b30ad51a385714a0aebe49373fd",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "84087"
},
{
"name": "C++",
"bytes": "680266"
},
{
"name": "CMake",
"bytes": "25813"
},
{
"name": "Python",
"bytes": "3205"
}
],
"symlink_target": ""
} |
"""Example workflow using all the methods in the Productstatuses service."""
from __future__ import absolute_import
from __future__ import print_function
import json
import sys
from shopping.content import common
def print_productstatus(status):
"""Prints a representation of the given product status."""
print(json.dumps(status, sort_keys=True, indent=2, separators=(',', ': ')))
def non_mca_workflow(service, config, page_size=50):
"""Performs the methods that can be used on non-MCA accounts.
Args:
service: The service object used to access the Content API.
config: The samples configuration as a Python dictionary.
page_size: The page size to use for calls to list methods.
"""
# Just used to shorten later calls to the Productstatuses service
ps = service.productstatuses()
merchant_id = config['merchantId']
count = 0
print('Printing status of all products:')
request = ps.list(merchantId=merchant_id, maxResults=page_size)
while request is not None:
result = request.execute()
statuses = result.get('resources')
if not statuses:
print('No product statuses returned.')
break
count += len(statuses)
for status in statuses:
print_productstatus(status)
request = ps.list_next(request, result)
print('Status for %d accounts printed.' % count)
def workflow(service, config):
"""Calls all possible Productstatuses methods on the configured account.
Args:
service: The service object used to access the Content API.
config: The samples configuration as a Python dictionary.
"""
print('Performing the Productstatuses workflow.')
print()
if common.is_mca(config):
print('Nothing to do, as MCAs contain no products.\n')
else:
non_mca_workflow(service, config)
print('Done with Productstatuses workflow.')
def main(argv):
# Authenticate and construct service.
service, config, _ = common.init(argv, __doc__)
workflow(service, config)
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "95e6af6b682a3d4dd5ef51d849bb3a1e",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 77,
"avg_line_length": 27.575342465753426,
"alnum_prop": 0.7069051167411823,
"repo_name": "googleads/googleads-shopping-samples",
"id": "dbbd77dba72ac9c9ad680ab02161bad8a7e8c2bc",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/shopping/content/productstatuses/workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "109971"
},
{
"name": "Go",
"bytes": "62736"
},
{
"name": "Java",
"bytes": "147661"
},
{
"name": "PHP",
"bytes": "83914"
},
{
"name": "Python",
"bytes": "132074"
},
{
"name": "Ruby",
"bytes": "123581"
}
],
"symlink_target": ""
} |
from pprint import pprint
from fo2.connections import db_cursor_so
from base.forms.forms2 import Forms2
from base.views import O2BaseGetPostView
from lotes.queries.analise.produzir_grade_empenho import mount_produzir_grade_empenho
__all__ = ['ProduzirGradeEmpenho']
class ProduzirGradeEmpenho(O2BaseGetPostView):
def __init__(self, *args, **kwargs):
super(ProduzirGradeEmpenho, self).__init__(*args, **kwargs)
self.Form_class = Forms2().Modelo
self.template_name = 'lotes/analise/produzir_grade_empenho.html'
self.title_name = 'A produzir, por grade, empenho e carteira'
self.get_args = ['modelo']
def mount_context(self):
cursor = db_cursor_so(self.request)
modelo = self.form.cleaned_data['modelo']
dados_produzir = mount_produzir_grade_empenho(cursor, modelo)
self.context.update(dados_produzir)
| {
"content_hash": "c99845d7eeabe28d4bac0a3db383efc4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 85,
"avg_line_length": 34.11538461538461,
"alnum_prop": 0.7023675310033822,
"repo_name": "anselmobd/fo2",
"id": "9a77b57142f80fcd74086b83c562849caf1f92a0",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lotes/views/analise/produzir_grade_empenho.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from pythonweb.services.userservice import UserService
class UserController(APIView):
def get(self, request, userid, format=None):
user = self.get_user(userid)
return Response(user.serialize(), status=status.HTTP_200_OK)
def get_user(self, userid):
userService = UserService()
return userService.get_user(userid)
| {
"content_hash": "6b9dab0e8ea90d02bc0d3ab5050ab79f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 68,
"avg_line_length": 29.875,
"alnum_prop": 0.7364016736401674,
"repo_name": "trwalker/python_poc",
"id": "a7b766764adc06ae1369a324d3fe68732eda840f",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonweb/controllers/usercontroller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4112"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tvshow', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='episode',
name='siteRating',
),
]
| {
"content_hash": "cb5e24f912ab0f60a8c5b24d46a66536",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 18.41176470588235,
"alnum_prop": 0.5814696485623003,
"repo_name": "guptachetan1997/Episodes",
"id": "df8e171d735b20508922ffcf983c2b0e5e022bac",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tvshow/migrations/0002_remove_episode_siterating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15723"
},
{
"name": "Python",
"bytes": "34585"
}
],
"symlink_target": ""
} |
from google.cloud.devtools import cloudbuild_v1
async def sample_cancel_build():
# Create a client
client = cloudbuild_v1.CloudBuildAsyncClient()
# Initialize request argument(s)
request = cloudbuild_v1.CancelBuildRequest(
project_id="project_id_value",
id="id_value",
)
# Make the request
response = await client.cancel_build(request=request)
# Handle the response
print(response)
# [END cloudbuild_v1_generated_CloudBuild_CancelBuild_async]
| {
"content_hash": "3ea8044234d27f4476338b96b2b2a4da",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 60,
"avg_line_length": 25.05,
"alnum_prop": 0.7005988023952096,
"repo_name": "googleapis/python-cloudbuild",
"id": "cae2c312aa9630dfbdc715e49d76863103872ca8",
"size": "1881",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/cloudbuild_v1_generated_cloud_build_cancel_build_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "729451"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""
Created on 1 Oct 2016
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
DESCRIPTION
The pt1000_calib utility is used to determine and save the voltage offset for each Pt1000 sensor.
The utility operates by measuring the temperature using a Sensirion SHT sensor, measuring the voltage output of the
Pt1000 sensor, and back-calculating the voltage offset.
For the utility to operate, the I2C address of the Pt1000 ADC must be set. This is done using the dfe_conf utility.
Note that the scs_analysis/gases_sampler process must be restarted for changes to take effect.
SYNOPSIS
pt1000_calib.py [{ -s | -d }] [-v]
EXAMPLES
./pt1000_calib.py -s
DOCUMENT EXAMPLE
{"calibrated-on": "2017-07-19T13:56:48.289+00:00", "v20": 0.002891}
FILES
~/SCS/conf/pt1000_calib.json
SEE ALSO
scs_dev/gases_sampler
scs_mfr/interface_conf
"""
import sys
from scs_core.data.json import JSONify
from scs_core.gas.afe.pt1000_calib import Pt1000Calib
from scs_dfe.interface.interface_conf import InterfaceConf
from scs_dfe.climate.sht_conf import SHTConf
from scs_host.bus.i2c import I2C
from scs_host.sys.host import Host
from scs_mfr.cmd.cmd_pt1000_calib import CmdPt1000Calib
# --------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
try:
I2C.Sensors.open()
# ------------------------------------------------------------------------------------------------------------
# cmd...
cmd = CmdPt1000Calib()
if not cmd.is_valid():
cmd.print_help(sys.stderr)
exit(2)
if cmd.verbose:
print("pt1000_calib: %s" % cmd, file=sys.stderr)
sys.stderr.flush()
# ------------------------------------------------------------------------------------------------------------
# resources...
# Interface...
interface_conf = InterfaceConf.load(Host)
if interface_conf is None:
print("pt1000_calib: InterfaceConf not available.", file=sys.stderr)
exit(1)
interface = interface_conf.interface()
if interface is None:
print("pt1000_calib: Interface not available.", file=sys.stderr)
exit(1)
if cmd.verbose and interface:
print("pt1000_calib: %s" % interface, file=sys.stderr)
# SHT...
sht_conf = SHTConf.load(Host)
sht = sht_conf.int_sht()
# validate...
if interface.pt1000 is None:
print("pt1000_calib: a Pt1000 ADC has not been configured for this system.", file=sys.stderr)
exit(1)
afe = interface.gas_sensors(Host)
# ------------------------------------------------------------------------------------------------------------
# run...
if cmd.set:
# SHT...
sht_datum = sht.sample()
if cmd.verbose:
print(sht_datum, file=sys.stderr)
# Pt1000 initial...
pt1000_datum = afe.sample_pt1000()
# Pt1000 correction...
v20 = pt1000_datum.v20(sht_datum.temp)
pt1000_calib = Pt1000Calib(None, v20)
pt1000_calib.save(Host)
elif cmd.delete:
Pt1000Calib.delete(Host)
pt1000_calib = None
else:
# load...
pt1000_calib = Pt1000Calib.load(Host)
# report...
if pt1000_calib:
print(JSONify.dumps(pt1000_calib))
if cmd.verbose:
afe = interface.gas_sensors(Host)
pt1000_datum = afe.sample_pt1000()
print(pt1000_datum, file=sys.stderr)
# ----------------------------------------------------------------------------------------------------------------
# end...
finally:
I2C.Sensors.close()
| {
"content_hash": "3d1f8edb513ee671c0ebb18cf3abc4a0",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 118,
"avg_line_length": 27.218309859154928,
"alnum_prop": 0.5151358344113842,
"repo_name": "south-coast-science/scs_mfr",
"id": "f827a9772c86de3042f9c59e8668a8f02e3dd566",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/scs_mfr/pt1000_calib.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "360578"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |