repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
jmhsi/justin_tinker | lib/python2.7/site-packages/pip/utils/logging.py | 516 | 3327 | from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emitted inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
| apache-2.0 | -8,200,614,654,039,108,000 | 24.592308 | 77 | 0.629396 | false |
luzheqi1987/nova-annotation | nova/openstack/common/eventlet_backdoor.py | 4 | 4762 | # Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from nova.openstack.common._i18n import _LI
from nova.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
| apache-2.0 | -3,198,023,656,057,584,000 | 31.841379 | 78 | 0.645317 | false |
MIPS/external-chromium_org | tools/protoc_wrapper/protoc_wrapper.py | 78 | 3783 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A simple wrapper for protoc.
- Adds includes in generated headers.
- Handles building with system protobuf as an option.
"""
import optparse
import os.path
import shutil
import subprocess
import sys
import tempfile
PROTOC_INCLUDE_POINT = '// @@protoc_insertion_point(includes)\n'
def ModifyHeader(header_file, extra_header):
"""Adds |extra_header| to |header_file|. Returns 0 on success.
|extra_header| is the name of the header file to include.
|header_file| is a generated protobuf cpp header.
"""
include_point_found = False
header_contents = []
with open(header_file) as f:
for line in f:
header_contents.append(line)
if line == PROTOC_INCLUDE_POINT:
extra_header_msg = '#include "%s"\n' % extra_header
header_contents.append(extra_header_msg)
include_point_found = True;
if not include_point_found:
return 1
with open(header_file, 'wb') as f:
f.write(''.join(header_contents))
return 0
def RewriteProtoFilesForSystemProtobuf(path):
wrapper_dir = tempfile.mkdtemp()
try:
for filename in os.listdir(path):
if not filename.endswith('.proto'):
continue
with open(os.path.join(path, filename), 'r') as src_file:
with open(os.path.join(wrapper_dir, filename), 'w') as dst_file:
for line in src_file:
# Remove lines that break build with system protobuf.
# We cannot optimize for lite runtime, because system lite runtime
# does not have a Chromium-specific hack to retain unknown fields.
# Similarly, it does not understand corresponding option to control
# the usage of that hack.
if 'LITE_RUNTIME' in line or 'retain_unknown_fields' in line:
continue
dst_file.write(line)
return wrapper_dir
except:
shutil.rmtree(wrapper_dir)
raise
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--include', dest='extra_header',
help='The extra header to include. This must be specified '
'along with --protobuf.')
parser.add_option('--protobuf', dest='generated_header',
help='The c++ protobuf header to add the extra header to. '
'This must be specified along with --include.')
parser.add_option('--proto-in-dir',
help='The directory containing .proto files.')
parser.add_option('--proto-in-file', help='Input file to compile.')
parser.add_option('--use-system-protobuf', type=int, default=0,
help='Option to use system-installed protobuf '
'instead of bundled one.')
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
return 1
proto_path = options.proto_in_dir
if options.use_system_protobuf == 1:
proto_path = RewriteProtoFilesForSystemProtobuf(proto_path)
try:
# Run what is hopefully protoc.
protoc_args = args[1:]
protoc_args += ['--proto_path=%s' % proto_path,
os.path.join(proto_path, options.proto_in_file)]
ret = subprocess.call(protoc_args)
if ret != 0:
return ret
finally:
if options.use_system_protobuf == 1:
# Remove temporary directory holding re-written files.
shutil.rmtree(proto_path)
# protoc succeeded, check to see if the generated cpp header needs editing.
if not options.extra_header or not options.generated_header:
return 0
return ModifyHeader(options.generated_header, options.extra_header)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | -5,299,682,730,316,538,000 | 33.390909 | 79 | 0.651071 | false |
drglove/SickRage | lib/sqlalchemy/orm/scoping.py | 79 | 6117 | # orm/scoping.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import exc as sa_exc
from ..util import ScopedRegistry, ThreadLocalRegistry, warn
from . import class_mapper, exc as orm_exc
from .session import Session
__all__ = ['scoped_session']
class scoped_session(object):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
"""
def __init__(self, session_factory, scopefunc=None):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
def __call__(self, **kw):
"""Return the current :class:`.Session`, creating it
using the session factory if not present.
:param \**kw: Keyword arguments will be passed to the
session factory callable, if an existing :class:`.Session`
is not present. If the :class:`.Session` is present and
keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
scope = kw.pop('scope', False)
if scope is not None:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified.")
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
return sess
else:
return self.session_factory(**kw)
else:
return self.registry()
def remove(self):
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def configure(self, **kwargs):
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn('At least one scoped session is already present. '
' configure() can not affect sessions that have '
'already been created.')
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a :class:`.Query` object
against the class and the current :class:`.Session` when called.
e.g.::
Session = scoped_session(sessionmaker())
class MyClass(object):
query = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name=='foo').all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query(object):
def __get__(s, instance, owner):
try:
mapper = class_mapper(owner)
if mapper:
if query_cls:
# custom query class
return query_cls(mapper, session=self.registry())
else:
# session's configured query class
return self.registry().query(mapper)
except orm_exc.UnmappedClassError:
return None
return query()
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
def instrument(name):
def do(self, *args, **kwargs):
return getattr(self.registry(), name)(*args, **kwargs)
return do
for meth in Session.public_methods:
setattr(scoped_session, meth, instrument(meth))
def makeprop(name):
def set(self, attr):
setattr(self.registry(), name, attr)
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush', 'info'):
setattr(scoped_session, prop, makeprop(prop))
def clslevel(name):
def do(cls, *args, **kwargs):
return getattr(Session, name)(*args, **kwargs)
return classmethod(do)
for prop in ('close_all', 'object_session', 'identity_key'):
setattr(scoped_session, prop, clslevel(prop))
| gpl-3.0 | -5,613,926,742,186,333,000 | 33.755682 | 84 | 0.594573 | false |
intel-iot-devkit/upm | examples/python/ims.py | 6 | 1785 | #!/usr/bin/env python
# Author: Noel Eck <noel.eck@intel.com>
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ims
def main():
# Create an instance of the I2C Moisture Sensor
# I2C bus 0, default address = 0x20
ims = pyupm_ims.IMS(0)
print ('I2C moisture sensor example...')
while (1):
try:
print ('Version: %d light: 0x%04x moisture: 0x%04x temp: %3.2f C' \
% (ims.get_version(), ims.get_light(), ims.get_moisture(),
ims.get_temperature()))
time.sleep(1)
except KeyboardInterrupt:
break
if __name__ == '__main__':
main()
| mit | 4,747,580,297,500,689,000 | 39.568182 | 79 | 0.69916 | false |
doug-fish/horizon | openstack_dashboard/test/api_tests/keystone_tests.py | 43 | 4914 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from keystoneclient.v2_0 import client as keystone_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class FakeConnection(object):
pass
class ClientConnectionTests(test.TestCase):
def setUp(self):
super(ClientConnectionTests, self).setUp()
self.mox.StubOutWithMock(keystone_client, "Client")
self.internal_url = api.base.url_for(self.request,
'identity',
endpoint_type='internalURL')
self.admin_url = api.base.url_for(self.request,
'identity',
endpoint_type='adminURL')
self.conn = FakeConnection()
class RoleAPITests(test.APITestCase):
def setUp(self):
super(RoleAPITests, self).setUp()
self.role = self.roles.member
self.roles = self.roles.list()
def test_remove_tenant_user(self):
"""Tests api.keystone.remove_tenant_user
Verifies that remove_tenant_user is called with the right arguments
after iterating the user's roles.
There are no assertions in this test because the checking is handled
by mox in the VerifyAll() call in tearDown().
"""
keystoneclient = self.stub_keystoneclient()
tenant = self.tenants.first()
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.roles_for_user(self.user.id,
tenant.id).AndReturn(self.roles)
for role in self.roles:
keystoneclient.roles.revoke(role.id,
domain=None,
group=None,
project=tenant.id,
user=self.user.id)
self.mox.ReplayAll()
api.keystone.remove_tenant_user(self.request, tenant.id, self.user.id)
def test_get_default_role(self):
keystoneclient = self.stub_keystoneclient()
keystoneclient.roles = self.mox.CreateMockAnything()
keystoneclient.roles.list().AndReturn(self.roles)
self.mox.ReplayAll()
role = api.keystone.get_default_role(self.request)
self.assertEqual(self.role, role)
# Verify that a second call doesn't hit the API again,
# (it would show up in mox as an unexpected method call)
role = api.keystone.get_default_role(self.request)
class ServiceAPITests(test.APITestCase):
def test_service_wrapper(self):
catalog = self.service_catalog
identity_data = api.base.get_service_from_catalog(catalog, "identity")
identity_data['id'] = 1
region = identity_data["endpoints"][0]["region"]
service = api.keystone.Service(identity_data, region)
self.assertEqual(u"identity (native backend)", unicode(service))
self.assertEqual(identity_data["endpoints"][0]["region"],
service.region)
self.assertEqual("http://int.keystone.example.com:5000/v2.0",
service.url)
self.assertEqual("http://public.keystone.example.com:5000/v2.0",
service.public_url)
self.assertEqual("int.keystone.example.com", service.host)
def test_service_wrapper_service_in_region(self):
catalog = self.service_catalog
compute_data = api.base.get_service_from_catalog(catalog, "compute")
compute_data['id'] = 1
region = compute_data["endpoints"][1]["region"]
service = api.keystone.Service(compute_data, region)
self.assertEqual(u"compute", unicode(service))
self.assertEqual(compute_data["endpoints"][1]["region"],
service.region)
self.assertEqual("http://int.nova2.example.com:8774/v2",
service.url)
self.assertEqual("http://public.nova2.example.com:8774/v2",
service.public_url)
self.assertEqual("int.nova2.example.com", service.host)
| apache-2.0 | 4,757,724,942,885,468,000 | 41.730435 | 78 | 0.618437 | false |
malept/gunicorn | gunicorn/debug.py | 155 | 2303 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
"""The debug module contains utilities and functions for better
debugging Gunicorn."""
import sys
import linecache
import re
import inspect
__all__ = ['spew', 'unspew']
_token_spliter = re.compile('\W+')
class Spew(object):
"""
"""
def __init__(self, trace_names=None, show_values=True):
self.trace_names = trace_names
self.show_values = show_values
def __call__(self, frame, event, arg):
if event == 'line':
lineno = frame.f_lineno
if '__file__' in frame.f_globals:
filename = frame.f_globals['__file__']
if (filename.endswith('.pyc') or
filename.endswith('.pyo')):
filename = filename[:-1]
name = frame.f_globals['__name__']
line = linecache.getline(filename, lineno)
else:
name = '[unknown]'
try:
src = inspect.getsourcelines(frame)
line = src[lineno]
except IOError:
line = 'Unknown code named [%s]. VM instruction #%d' % (
frame.f_code.co_name, frame.f_lasti)
if self.trace_names is None or name in self.trace_names:
print('%s:%s: %s' % (name, lineno, line.rstrip()))
if not self.show_values:
return self
details = []
tokens = _token_spliter.split(line)
for tok in tokens:
if tok in frame.f_globals:
details.append('%s=%r' % (tok, frame.f_globals[tok]))
if tok in frame.f_locals:
details.append('%s=%r' % (tok, frame.f_locals[tok]))
if details:
print("\t%s" % ' '.join(details))
return self
def spew(trace_names=None, show_values=False):
"""Install a trace hook which writes incredibly detailed logs
about what code is being executed to stdout.
"""
sys.settrace(Spew(trace_names, show_values))
def unspew():
"""Remove the trace hook installed by spew.
"""
sys.settrace(None)
| mit | 6,042,150,760,623,615,000 | 31.9 | 77 | 0.518888 | false |
exploreodoo/datStruct | odoo/addons/website/tests/test_views.py | 29 | 8991 | # -*- coding: utf-8 -*-
import itertools
import unittest2
from lxml import etree as ET, html
from lxml.html import builder as h
from openerp.tests import common
def attrs(**kwargs):
return dict(('data-oe-%s' % key, str(value)) for key, value in kwargs.iteritems())
class TestViewSaving(common.TransactionCase):
def eq(self, a, b):
self.assertEqual(a.tag, b.tag)
self.assertEqual(a.attrib, b.attrib)
self.assertEqual((a.text or '').strip(), (b.text or '').strip())
self.assertEqual((a.tail or '').strip(), (b.tail or '').strip())
for ca, cb in itertools.izip_longest(a, b):
self.eq(ca, cb)
def setUp(self):
super(TestViewSaving, self).setUp()
self.arch = h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
)
self.view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'name': "Test View",
'type': 'qweb',
'arch': ET.tostring(self.arch, encoding='utf-8').decode('utf-8')
})
def test_embedded_extraction(self):
fields = self.registry('ir.ui.view').extract_embedded_fields(
self.cr, self.uid, self.arch, context=None)
expect = [
h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char')),
h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')),
]
for actual, expected in itertools.izip_longest(fields, expect):
self.eq(actual, expected)
def test_embedded_save(self):
embedded = h.SPAN("+00 00 000 00 0 000", attrs(
model='res.company', id=1, field='phone', type='char'))
self.registry('ir.ui.view').save_embedded_field(self.cr, self.uid, embedded)
company = self.registry('res.company').browse(self.cr, self.uid, 1)
self.assertEqual(company.phone, "+00 00 000 00 0 000")
@unittest2.skip("save conflict for embedded (saved by third party or previous version in page) not implemented")
def test_embedded_conflict(self):
e1 = h.SPAN("My Company", attrs(model='res.company', id=1, field='name'))
e2 = h.SPAN("Leeroy Jenkins", attrs(model='res.company', id=1, field='name'))
View = self.registry('ir.ui.view')
View.save_embedded_field(self.cr, self.uid, e1)
# FIXME: more precise exception
with self.assertRaises(Exception):
View.save_embedded_field(self.cr, self.uid, e2)
def test_embedded_to_field_ref(self):
View = self.registry('ir.ui.view')
embedded = h.SPAN("My Company", attrs(expression="bob"))
self.eq(
View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob'})
)
def test_to_field_ref_keep_attributes(self):
View = self.registry('ir.ui.view')
att = attrs(expression="bob", model="res.company", id=1, field="name")
att['id'] = "whop"
att['class'] = "foo bar"
embedded = h.SPAN("My Company", att)
self.eq(View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob', 'class': 'foo bar', 'id': 'whop'}))
def test_replace_arch(self):
replacement = h.P("Wheee")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, h.DIV("Wheee"))
def test_replace_arch_2(self):
replacement = h.DIV(h.P("Wheee"))
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, replacement)
def test_fixup_arch(self):
replacement = h.H1("I am the greatest title alive!")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div[1]/h3',
replacement)
self.eq(result, h.DIV(
h.DIV(
h.H3("I am the greatest title alive!"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
))
def test_multiple_xpath_matches(self):
with self.assertRaises(ValueError):
self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div/h3',
h.H6("Lol nope"))
def test_save(self):
Company = self.registry('res.company')
View = self.registry('ir.ui.view')
replacement = ET.tostring(h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN("Acme Corporation", attrs(model='res.company', id=1, field='name', expression="bob", type='char'))),
h.LI(h.SPAN("+12 3456789", attrs(model='res.company', id=1, field='phone', expression="edmund", type='char'))),
)
), encoding='utf-8')
View.save(self.cr, self.uid, res_id=self.view_id, value=replacement,
xpath='/div/div[2]')
company = Company.browse(self.cr, self.uid, 1)
self.assertEqual(company.name, "Acme Corporation")
self.assertEqual(company.phone, "+12 3456789")
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN({'t-field': "bob"})),
h.LI(h.SPAN({'t-field': "edmund"}))
))
)
)
def test_save_escaped_text(self):
view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'arch':'<t>hello world</t>',
'type':'qweb'
})
view = self.registry('ir.ui.view').browse(self.cr, self.uid, view_id)
replacement = 'hello world & <angle brackets>!'
view.save(replacement, xpath='/t')
self.assertEqual(view.render(), replacement, 'html special characters wrongly escaped')
def test_save_only_embedded(self):
Company = self.registry('res.company')
company_id = 1
Company.write(self.cr, self.uid, company_id, {'name': "Foo Corporation"})
node = html.tostring(h.SPAN(
"Acme Corporation",
attrs(model='res.company', id=company_id, field="name", expression='bob', type='char')))
self.registry('ir.ui.view').save(self.cr, self.uid, res_id=company_id,value=node)
company = Company.browse(self.cr, self.uid, company_id)
self.assertEqual(company.name, "Acme Corporation")
def test_field_tail(self):
View = self.registry('ir.ui.view')
replacement = ET.tostring(
h.LI(h.SPAN("+12 3456789", attrs(
model='res.company', id=1, type='char',
field='phone', expression="edmund")),
"whop whop"
), encoding="utf-8")
View.save(self.cr, self.uid, res_id = self.view_id, value=replacement,
xpath='/div/div[2]/ul/li[3]')
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN({'t-field': "edmund"}), "whop whop"),
))
)
)
| gpl-2.0 | -6,371,149,402,694,606,000 | 38.091304 | 128 | 0.509176 | false |
anbasile/flask_sample | flask/lib/python2.7/site-packages/werkzeug/formparser.py | 162 | 21207 | # -*- coding: utf-8 -*-
"""
werkzeug.formparser
~~~~~~~~~~~~~~~~~~~
This module implements the form parsing. It supports url-encoded forms
as well as non-nested multipart uploads.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
from io import BytesIO
from tempfile import TemporaryFile
from itertools import chain, repeat, tee
from functools import update_wrapper
from werkzeug._compat import to_native, text_type
from werkzeug.urls import url_decode_stream
from werkzeug.wsgi import make_line_iter, \
get_input_stream, get_content_length
from werkzeug.datastructures import Headers, FileStorage, MultiDict
from werkzeug.http import parse_options_header
#: an iterator that yields empty strings
_empty_string_iter = repeat('')
#: a regular expression for multipart boundaries
_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
#: supported http encodings that are also available in python we support
#: for multipart messages.
_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
def default_stream_factory(total_content_length, filename, content_type,
content_length=None):
"""The stream factory that is used per default."""
if total_content_length > 1024 * 500:
return TemporaryFile('wb+')
return BytesIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
"""Parse the form data in the environ and return it as tuple in the form
``(stream, form, files)``. You should only call this method if the
transport method is `POST`, `PUT`, or `PATCH`.
If the mimetype of the data transmitted is `multipart/form-data` the
files multidict will be filled with `FileStorage` objects. If the
mimetype is unknown the input stream is wrapped and returned as first
argument, else the stream is empty.
This is a shortcut for the common usage of :class:`FormDataParser`.
Have a look at :ref:`dealing-with-request-data` for more details.
.. versionadded:: 0.5
The `max_form_memory_size`, `max_content_length` and
`cls` parameters were added.
.. versionadded:: 0.5.1
The optional `silent` flag was added.
:param environ: the WSGI environment to be used for parsing.
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
:return: A tuple in the form ``(stream, form, files)``.
"""
return FormDataParser(stream_factory, charset, errors,
max_form_memory_size, max_content_length,
cls, silent).parse_from_environ(environ)
def exhaust_stream(f):
"""Helper decorator for methods that exhausts the stream on return."""
def wrapper(self, stream, *args, **kwargs):
try:
return f(self, stream, *args, **kwargs)
finally:
exhaust = getattr(stream, 'exhaust', None)
if exhaust is not None:
exhaust()
else:
while 1:
chunk = stream.read(1024 * 64)
if not chunk:
break
return update_wrapper(wrapper, f)
class FormDataParser(object):
"""This class implements parsing of form data for Werkzeug. By itself
it can parse multipart and url encoded form data. It can be subclassed
and extended but for most mimetypes it is a better idea to use the
untouched stream and expose it as separate attributes on a request
object.
.. versionadded:: 0.8
:param stream_factory: An optional callable that returns a new read and
writeable file descriptor. This callable works
the same as :meth:`~BaseResponse._get_file_stream`.
:param charset: The character set for URL and url encoded form data.
:param errors: The encoding error behavior.
:param max_form_memory_size: the maximum number of bytes to be accepted for
in-memory stored form data. If the data
exceeds the value specified an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param max_content_length: If this is provided and the transmitted data
is longer than this value an
:exc:`~exceptions.RequestEntityTooLarge`
exception is raised.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param silent: If set to False parsing errors will not be caught.
"""
def __init__(self, stream_factory=None, charset='utf-8',
errors='replace', max_form_memory_size=None,
max_content_length=None, cls=None,
silent=True):
if stream_factory is None:
stream_factory = default_stream_factory
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
self.max_content_length = max_content_length
if cls is None:
cls = MultiDict
self.cls = cls
self.silent = silent
def get_parse_func(self, mimetype, options):
return self.parse_functions.get(mimetype)
def parse_from_environ(self, environ):
"""Parses the information from the environment as form data.
:param environ: the WSGI environment to be used for parsing.
:return: A tuple in the form ``(stream, form, files)``.
"""
content_type = environ.get('CONTENT_TYPE', '')
content_length = get_content_length(environ)
mimetype, options = parse_options_header(content_type)
return self.parse(get_input_stream(environ), mimetype,
content_length, options)
def parse(self, stream, mimetype, content_length, options=None):
"""Parses the information from the given stream, mimetype,
content length and mimetype parameters.
:param stream: an input stream
:param mimetype: the mimetype of the data
:param content_length: the content length of the incoming data
:param options: optional mimetype parameters (used for
the multipart boundary for instance)
:return: A tuple in the form ``(stream, form, files)``.
"""
if self.max_content_length is not None and \
content_length is not None and \
content_length > self.max_content_length:
raise exceptions.RequestEntityTooLarge()
if options is None:
options = {}
parse_func = self.get_parse_func(mimetype, options)
if parse_func is not None:
try:
return parse_func(self, stream, mimetype,
content_length, options)
except ValueError:
if not self.silent:
raise
return stream, self.cls(), self.cls()
@exhaust_stream
def _parse_multipart(self, stream, mimetype, content_length, options):
parser = MultiPartParser(self.stream_factory, self.charset, self.errors,
max_form_memory_size=self.max_form_memory_size,
cls=self.cls)
boundary = options.get('boundary')
if boundary is None:
raise ValueError('Missing boundary')
if isinstance(boundary, text_type):
boundary = boundary.encode('ascii')
form, files = parser.parse(stream, boundary, content_length)
return stream, form, files
@exhaust_stream
def _parse_urlencoded(self, stream, mimetype, content_length, options):
if self.max_form_memory_size is not None and \
content_length is not None and \
content_length > self.max_form_memory_size:
raise exceptions.RequestEntityTooLarge()
form = url_decode_stream(stream, self.charset,
errors=self.errors, cls=self.cls)
return stream, form, self.cls()
#: mapping of mimetypes to parsing functions
parse_functions = {
'multipart/form-data': _parse_multipart,
'application/x-www-form-urlencoded': _parse_urlencoded,
'application/x-url-encoded': _parse_urlencoded
}
def is_valid_multipart_boundary(boundary):
"""Checks if the string given is a valid multipart boundary."""
return _multipart_boundary_re.match(boundary) is not None
def _line_parse(line):
"""Removes line ending characters and returns a tuple (`stripped_line`,
`is_terminated`).
"""
if line[-2:] in ['\r\n', b'\r\n']:
return line[:-2], True
elif line[-1:] in ['\r', '\n', b'\r', b'\n']:
return line[:-1], True
return line, False
def parse_multipart_headers(iterable):
"""Parses multipart headers from an iterable that yields lines (including
the trailing newline symbol). The iterable has to be newline terminated.
The iterable will stop at the line where the headers ended so it can be
further consumed.
:param iterable: iterable of strings that are newline terminated
"""
result = []
for line in iterable:
line = to_native(line)
line, line_terminated = _line_parse(line)
if not line_terminated:
raise ValueError('unexpected end of line in multipart header')
if not line:
break
elif line[0] in ' \t' and result:
key, value = result[-1]
result[-1] = (key, value + '\n ' + line[1:])
else:
parts = line.split(':', 1)
if len(parts) == 2:
result.append((parts[0].strip(), parts[1].strip()))
# we link the list to the headers, no need to create a copy, the
# list was not shared anyways.
return Headers(result)
_begin_form = 'begin_form'
_begin_file = 'begin_file'
_cont = 'cont'
_end = 'end'
class MultiPartParser(object):
def __init__(self, stream_factory=None, charset='utf-8', errors='replace',
max_form_memory_size=None, cls=None, buffer_size=64 * 1024):
self.stream_factory = stream_factory
self.charset = charset
self.errors = errors
self.max_form_memory_size = max_form_memory_size
if stream_factory is None:
stream_factory = default_stream_factory
if cls is None:
cls = MultiDict
self.cls = cls
# make sure the buffer size is divisible by four so that we can base64
# decode chunk by chunk
assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
# also the buffer size has to be at least 1024 bytes long or long headers
# will freak out the system
assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
self.buffer_size = buffer_size
def _fix_ie_filename(self, filename):
"""Internet Explorer 6 transmits the full file name if a file is
uploaded. This function strips the full path if it thinks the
filename is Windows-like absolute.
"""
if filename[1:3] == ':\\' or filename[:2] == '\\\\':
return filename.split('\\')[-1]
return filename
def _find_terminator(self, iterator):
"""The terminator might have some additional newlines before it.
There is at least one application that sends additional newlines
before headers (the python setuptools package).
"""
for line in iterator:
if not line:
break
line = line.strip()
if line:
return line
return b''
def fail(self, message):
raise ValueError(message)
def get_part_encoding(self, headers):
transfer_encoding = headers.get('content-transfer-encoding')
if transfer_encoding is not None and \
transfer_encoding in _supported_multipart_encodings:
return transfer_encoding
def get_part_charset(self, headers):
# Figure out input charset for current part
content_type = headers.get('content-type')
if content_type:
mimetype, ct_params = parse_options_header(content_type)
return ct_params.get('charset', self.charset)
return self.charset
def start_file_streaming(self, filename, headers, total_content_length):
if isinstance(filename, bytes):
filename = filename.decode(self.charset, self.errors)
filename = self._fix_ie_filename(filename)
content_type = headers.get('content-type')
try:
content_length = int(headers['content-length'])
except (KeyError, ValueError):
content_length = 0
container = self.stream_factory(total_content_length, content_type,
filename, content_length)
return filename, container
def in_memory_threshold_reached(self, bytes):
raise exceptions.RequestEntityTooLarge()
def validate_boundary(self, boundary):
if not boundary:
self.fail('Missing boundary')
if not is_valid_multipart_boundary(boundary):
self.fail('Invalid boundary: %s' % boundary)
if len(boundary) > self.buffer_size: # pragma: no cover
# this should never happen because we check for a minimum size
# of 1024 and boundaries may not be longer than 200. The only
# situation when this happens is for non debug builds where
# the assert is skipped.
self.fail('Boundary longer than buffer size')
def parse_lines(self, file, boundary, content_length):
"""Generate parts of
``('begin_form', (headers, name))``
``('begin_file', (headers, name, filename))``
``('cont', bytestring)``
``('end', None)``
Always obeys the grammar
parts = ( begin_form cont* end |
begin_file cont* end )*
"""
next_part = b'--' + boundary
last_part = next_part + b'--'
iterator = chain(make_line_iter(file, limit=content_length,
buffer_size=self.buffer_size),
_empty_string_iter)
terminator = self._find_terminator(iterator)
if terminator == last_part:
return
elif terminator != next_part:
self.fail('Expected boundary at start of multipart data')
while terminator != last_part:
headers = parse_multipart_headers(iterator)
disposition = headers.get('content-disposition')
if disposition is None:
self.fail('Missing Content-Disposition header')
disposition, extra = parse_options_header(disposition)
transfer_encoding = self.get_part_encoding(headers)
name = extra.get('name')
filename = extra.get('filename')
# if no content type is given we stream into memory. A list is
# used as a temporary container.
if filename is None:
yield _begin_form, (headers, name)
# otherwise we parse the rest of the headers and ask the stream
# factory for something we can write in.
else:
yield _begin_file, (headers, name, filename)
buf = b''
for line in iterator:
if not line:
self.fail('unexpected end of stream')
if line[:2] == b'--':
terminator = line.rstrip()
if terminator in (next_part, last_part):
break
if transfer_encoding is not None:
if transfer_encoding == 'base64':
transfer_encoding = 'base64_codec'
try:
line = codecs.decode(line, transfer_encoding)
except Exception:
self.fail('could not decode transfer encoded chunk')
# we have something in the buffer from the last iteration.
# this is usually a newline delimiter.
if buf:
yield _cont, buf
buf = b''
# If the line ends with windows CRLF we write everything except
# the last two bytes. In all other cases however we write
# everything except the last byte. If it was a newline, that's
# fine, otherwise it does not matter because we will write it
# the next iteration. this ensures we do not write the
# final newline into the stream. That way we do not have to
# truncate the stream. However we do have to make sure that
# if something else than a newline is in there we write it
# out.
if line[-2:] == b'\r\n':
buf = b'\r\n'
cutoff = -2
else:
buf = line[-1:]
cutoff = -1
yield _cont, line[:cutoff]
else: # pragma: no cover
raise ValueError('unexpected end of part')
# if we have a leftover in the buffer that is not a newline
# character we have to flush it, otherwise we will chop of
# certain values.
if buf not in (b'', b'\r', b'\n', b'\r\n'):
yield _cont, buf
yield _end, None
def parse_parts(self, file, boundary, content_length):
"""Generate ``('file', (name, val))`` and
``('form', (name, val))`` parts.
"""
in_memory = 0
for ellt, ell in self.parse_lines(file, boundary, content_length):
if ellt == _begin_file:
headers, name, filename = ell
is_file = True
guard_memory = False
filename, container = self.start_file_streaming(
filename, headers, content_length)
_write = container.write
elif ellt == _begin_form:
headers, name = ell
is_file = False
container = []
_write = container.append
guard_memory = self.max_form_memory_size is not None
elif ellt == _cont:
_write(ell)
# if we write into memory and there is a memory size limit we
# count the number of bytes in memory and raise an exception if
# there is too much data in memory.
if guard_memory:
in_memory += len(ell)
if in_memory > self.max_form_memory_size:
self.in_memory_threshold_reached(in_memory)
elif ellt == _end:
if is_file:
container.seek(0)
yield ('file',
(name, FileStorage(container, filename, name,
headers=headers)))
else:
part_charset = self.get_part_charset(headers)
yield ('form',
(name, b''.join(container).decode(
part_charset, self.errors)))
def parse(self, file, boundary, content_length):
formstream, filestream = tee(
self.parse_parts(file, boundary, content_length), 2)
form = (p[1] for p in formstream if p[0] == 'form')
files = (p[1] for p in filestream if p[0] == 'file')
return self.cls(form), self.cls(files)
from werkzeug import exceptions
| mit | -7,921,150,066,936,152,000 | 39.394286 | 81 | 0.578017 | false |
lliendo/Radar | radar/class_loader/__init__.py | 1 | 2634 | # -*- coding: utf-8 -*-
"""
This file is part of Radar.
Radar is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Radar is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Lesser GNU General Public License for more details.
You should have received a copy of the Lesser GNU General Public License
along with Radar. If not, see <http://www.gnu.org/licenses/>.
Copyright 2015 Lucas Liendo.
"""
from io import open
from os.path import join as join_path
from ast import parse as ast_parse
from ast import walk as ast_walk
from ast import ClassDef
from pkgutil import iter_modules
from sys import path as module_search_path
from ..logger import RadarLogger
class ClassLoaderError(Exception):
pass
class ClassLoader(object):
"""
This class offers a simple mechanism to get all user-defined
classes from an external module. This is useful if you want
to load unknown classes dynamically at run-time.
"""
ENCODING_DECLARATION = '# -*- coding: utf-8 -*-'
def __init__(self, module_path):
self._module_path = module_path
module_search_path.append(module_path)
def _get_class_names(self, filename):
class_names = []
try:
with open(filename) as fd:
parsed_source = ast_parse(fd.read().strip(self.ENCODING_DECLARATION))
class_names = [n.name for n in ast_walk(parsed_source) if isinstance(n, ClassDef)]
except IOError as e:
raise ClassLoaderError('Error - Couldn\'t open : \'{:}\'. Reason : {:}.'.format(filename, e.strerror))
except SyntaxError as e:
raise ClassLoaderError('Error - Couldn\'t parse \'{:}\'. Reason: {:}.'.format(filename, e))
return class_names
def get_classes(self, subclass=object):
classes = []
for _, module_name, _ in iter_modules(path=[self._module_path]):
module_path = join_path(self._module_path, module_name)
try:
class_names = self._get_class_names(module_path + '/__init__.py')
imported_module = __import__(module_name)
classes += [getattr(imported_module, class_name) for class_name in class_names]
except ClassLoaderError as e:
RadarLogger.log(e)
return [C for C in classes if issubclass(C, subclass)]
| lgpl-3.0 | 2,113,619,975,999,584,000 | 33.207792 | 114 | 0.66325 | false |
DARKPOP/external_chromium_org | third_party/closure_linter/closure_linter/common/tokens.py | 129 | 4741 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent tokens and positions within them."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class TokenType(object):
"""Token types common to all languages."""
NORMAL = 'normal'
WHITESPACE = 'whitespace'
BLANK_LINE = 'blank line'
class Token(object):
"""Token class for intelligent text splitting.
The token class represents a string of characters and an identifying type.
Attributes:
type: The type of token.
string: The characters the token comprises.
length: The length of the token.
line: The text of the line the token is found in.
line_number: The number of the line the token is found in.
values: Dictionary of values returned from the tokens regex match.
previous: The token before this one.
next: The token after this one.
start_index: The character index in the line where this token starts.
attached_object: Object containing more information about this token.
metadata: Object containing metadata about this token. Must be added by
a separate metadata pass.
"""
def __init__(self, string, token_type, line, line_number, values=None,
orig_line_number=None):
"""Creates a new Token object.
Args:
string: The string of input the token contains.
token_type: The type of token.
line: The text of the line this token is in.
line_number: The line number of the token.
values: A dict of named values within the token. For instance, a
function declaration may have a value called 'name' which captures the
name of the function.
orig_line_number: The line number of the original file this token comes
from. This should be only set during the tokenization process. For newly
created error fix tokens after that, it should be None.
"""
self.type = token_type
self.string = string
self.length = len(string)
self.line = line
self.line_number = line_number
self.orig_line_number = orig_line_number
self.values = values
self.is_deleted = False
# These parts can only be computed when the file is fully tokenized
self.previous = None
self.next = None
self.start_index = None
# This part is set in statetracker.py
# TODO(robbyw): Wrap this in to metadata
self.attached_object = None
# This part is set in *metadatapass.py
self.metadata = None
def IsFirstInLine(self):
"""Tests if this token is the first token in its line.
Returns:
Whether the token is the first token in its line.
"""
return not self.previous or self.previous.line_number != self.line_number
def IsLastInLine(self):
"""Tests if this token is the last token in its line.
Returns:
Whether the token is the last token in its line.
"""
return not self.next or self.next.line_number != self.line_number
def IsType(self, token_type):
"""Tests if this token is of the given type.
Args:
token_type: The type to test for.
Returns:
True if the type of this token matches the type passed in.
"""
return self.type == token_type
def IsAnyType(self, *token_types):
"""Tests if this token is any of the given types.
Args:
token_types: The types to check. Also accepts a single array.
Returns:
True if the type of this token is any of the types passed in.
"""
if not isinstance(token_types[0], basestring):
return self.type in token_types[0]
else:
return self.type in token_types
def __repr__(self):
return '<Token: %s, "%s", %r, %d, %r>' % (self.type, self.string,
self.values, self.line_number,
self.metadata)
def __iter__(self):
"""Returns a token iterator."""
node = self
while node:
yield node
node = node.next
def __reversed__(self):
"""Returns a reverse-direction token iterator."""
node = self
while node:
yield node
node = node.previous
| bsd-3-clause | 4,255,788,805,903,273,500 | 31.696552 | 80 | 0.663784 | false |
timj/scons | test/TEX/clean.py | 3 | 2833 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Check that all auxilary files created by LaTeX are properly cleaned by scons -c.
"""
import os
import TestSCons
test = TestSCons.TestSCons()
latex = test.where_is('latex')
if not latex:
test.skip_test("Could not find tex or latex; skipping test(s).\n")
comment = os.system('kpsewhich comment.sty')
if not comment==0:
test.skip_test("comment.sty not installed; skipping test(s).\n")
# package hyperref generates foo.out
# package comment generates comment.cut
# todo: add makeindex etc.
input_file = r"""
\documentclass{article}
\usepackage{hyperref}
\usepackage{comment}
\specialcomment{foocom}{}{}
\begin{document}
\begin{foocom}
Hi
\end{foocom}
As stated in \cite{X}, this is a bug-a-boo.
\bibliography{fooref}
\bibliographystyle{plain}
\end{document}
"""
bibfile = r"""
@Article{X,
author = "Mr. X",
title = "A determination of bug-a-boo-ness",
journal = "Journal of B.a.B.",
year = 1920,
volume = 62,
pages = 291
}
"""
test.write('SConstruct', """\
import os
env = Environment(tools = ['tex', 'latex'])
env.DVI( "foo.ltx" )
""")
test.write('foo.ltx', input_file)
test.write('fooref.bib', bibfile)
test.run()
test.must_exist('foo.log')
test.must_exist('foo.aux')
test.must_exist('foo.bbl')
test.must_exist('foo.blg')
test.must_exist('comment.cut')
test.must_exist('foo.out')
test.run(arguments = '-c')
test.must_not_exist('foo.log')
test.must_not_exist('foo.aux')
test.must_not_exist('foo.bbl')
test.must_not_exist('foo.blg')
test.must_not_exist('comment.cut')
test.must_not_exist('foo.out')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | 1,548,840,893,365,790,200 | 25.476636 | 80 | 0.716908 | false |
pshen/ansible | lib/ansible/modules/network/iosxr/_iosxr_template.py | 47 | 5327 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: iosxr_template
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage Cisco IOS XR device configurations over SSH
description:
- Manages network device configurations over SSH. This module
allows implementers to work with the device running-config. It
provides a way to push a set of commands onto a network device
by evaluating the current running-config and only pushing configuration
commands that are not already configured. The config source can
be a set of commands or a template.
deprecated: Deprecated in 2.2. Use M(iosxr_config) instead.
extends_documentation_fragment: iosxr
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will first search for the source
file in role or playbook root folder in templates unless a full
path to the file is given.
required: false
default: null
force:
description:
- The force argument instructs the module not to consider the
current device running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
required: false
default: false
choices: [ "true", "false" ]
backup:
description:
- When this argument is configured true, the module will backup
the running-config from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: [ "true", "false" ]
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task. The I(config) argument allows the implementer to
pass in the configuration to use as the base config for
comparison.
required: false
default: null
"""
EXAMPLES = """
- name: push a configuration onto the device
iosxr_template:
src: config.j2
- name: forceable push a configuration onto the device
iosxr_template:
src: config.j2
force: yes
- name: provide the base configuration for comparison
iosxr_template:
src: candidate_config.txt
config: current_config.txt
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import NetworkConfig, dumps
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def main():
""" main entry point for module execution
"""
argument_spec = dict(
src=dict(),
force=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
config=dict(),
)
argument_spec.update(iosxr_argument_spec)
mutually_exclusive = [('config', 'backup'), ('config', 'force')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
candidate = NetworkConfig(contents=module.params['src'], indent=1)
if module.params['backup']:
result['__backup__'] = get_config(module)
if not module.params['force']:
contents = get_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj)
commands = dumps(commands, 'commands').split('\n')
commands = [str(c).strip() for c in commands if c]
else:
commands = [c.strip() for c in str(candidate).split('\n')]
if commands:
load_config(module, commands, result['warnings'], not module.check_mode)
result['changed'] = not module.check_mode
result['updates'] = commands
result['commands'] = commands
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,893,643,827,327,665,000 | 32.71519 | 80 | 0.681246 | false |
wbc2010/django1.2.5 | django1.2.5/django/template/loaders/eggs.py | 65 | 1434 | # Wrapper for loading templates from eggs via pkg_resources.resource_string.
try:
from pkg_resources import resource_string
except ImportError:
resource_string = None
from django.template import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.conf import settings
class Loader(BaseLoader):
is_usable = resource_string is not None
def load_template_source(self, template_name, template_dirs=None):
"""
Loads templates from Python eggs via pkg_resource.resource_string.
For every installed app, it tries to get the resource (app, template_name).
"""
if resource_string is not None:
pkg_name = 'templates/' + template_name
for app in settings.INSTALLED_APPS:
try:
return (resource_string(app, pkg_name).decode(settings.FILE_CHARSET), 'egg:%s:%s' % (app, pkg_name))
except:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
import warnings
warnings.warn(
"'django.template.loaders.eggs.load_template_source' is deprecated; use 'django.template.loaders.eggs.Loader' instead.",
PendingDeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = resource_string is not None
| bsd-3-clause | -5,479,921,764,174,205,000 | 35.769231 | 128 | 0.6841 | false |
sestrella/ansible | test/units/modules/network/check_point/test_cp_mgmt_tag.py | 19 | 3840 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_tag
OBJECT = {
"name": "My New Tag1",
"tags": [
"tag1",
"tag2"
]
}
CREATE_PAYLOAD = {
"name": "My New Tag1",
"tags": [
"tag1",
"tag2"
]
}
UPDATE_PAYLOAD = {
"name": "My New Tag1"
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "My New Tag1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_tag.api_call'
api_call_object = 'tag'
class TestCheckpointTag(object):
module = cp_mgmt_tag
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 | 6,399,293,453,544,189,000 | 31.820513 | 109 | 0.677604 | false |
molliewhitacre/aston | aston/qtgui/TableFile.py | 3 | 18270 | # -*- coding: utf-8 -*-
# Copyright 2011-2014 Roderick Bovee
#
# This file is part of Aston.
#
# Aston is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Aston is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Aston. If not, see <http://www.gnu.org/licenses/>.
"""
Model for handling display of open files.
"""
#pylint: disable=C0103
from __future__ import unicode_literals
import json
from collections import OrderedDict
from PyQt4 import QtGui, QtCore
from aston.resources import resfile
from aston.qtgui.Fields import aston_fields, aston_groups
from aston.database.File import Project, Run, Analysis
from aston.qtgui.TableModel import TableModel
class FileTreeModel(TableModel):
"""
Handles interfacing with QTreeView and other file-related duties.
"""
def __init__(self, database=None, tree_view=None, master_window=None, \
*args):
super(FileTreeModel, self).__init__(database, tree_view, \
master_window, *args)
#TODO: load custom fields from the database
self.fields = ['name', 'sel']
# create a list with all of the root items in it
self._children = self.db.query(Project).filter(Project.name != '').all()
prj = self.db.query(Project).filter_by(name='').first()
if prj is None:
self._children = []
else:
q = self.db.query(Run)
self._children += q.filter_by(_project_id=prj._project_id).all()
self.reset()
##set up selections
#tree_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
#tree_view.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
##TODO: this works, but needs to be detached when opening a new folder
#tree_view.selectionModel().currentChanged.connect(self.itemSelected)
##tree_view.clicked.connect(self.itemSelected)
##set up key shortcuts
#delAc = QtGui.QAction("Delete", tree_view, \
# shortcut=QtCore.Qt.Key_Backspace, triggered=self.delItemKey)
#delAc = QtGui.QAction("Delete", tree_view, \
# shortcut=QtCore.Qt.Key_Delete, triggered=self.delItemKey)
#tree_view.addAction(delAc)
##set up right-clicking
#tree_view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
#tree_view.customContextMenuRequested.connect(self.click_main)
#tree_view.header().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
#tree_view.header().customContextMenuRequested.connect( \
# self.click_head)
#tree_view.header().setStretchLastSection(False)
##set up drag and drop
#tree_view.setDragEnabled(True)
#tree_view.setAcceptDrops(True)
#tree_view.setDragDropMode(QtGui.QAbstractItemView.DragDrop)
#tree_view.dragMoveEvent = self.dragMoveEvent
##keep us aware of column reordering
#self.tree_view.header().sectionMoved.connect(self.colsChanged)
##deal with combo boxs in table
#self.cDelegates = {}
#self.enableComboCols()
#prettify
tree_view.collapseAll()
tree_view.setColumnWidth(0, 300)
tree_view.setColumnWidth(1, 60)
#update_db = self.db.get_key('db_reload_on_open', dflt=True)
#if type(database) == AstonFileDatabase and update_db:
# self.loadthread = LoadFilesThread(self.db)
# self.loadthread.file_updated.connect(self.update_obj)
# self.loadthread.start()
def dragMoveEvent(self, event):
#TODO: files shouldn't be able to be under peaks
#index = self.proxy_mod.mapToSource(self.tree_view.indexAt(event.pos()))
if event.mimeData().hasFormat('application/x-aston-file'):
QtGui.QTreeView.dragMoveEvent(self.tree_view, event)
else:
event.ignore()
def mimeTypes(self):
types = QtCore.QStringList()
types.append('text/plain')
types.append('application/x-aston-file')
return types
def mimeData(self, indexList):
data = QtCore.QMimeData()
objs = [i.internalPointer() for i in indexList \
if i.column() == 0]
data.setText(self.items_as_csv(objs))
id_lst = [str(o.db_id) for o in objs]
data.setData('application/x-aston-file', ','.join(id_lst))
return data
def dropMimeData(self, data, action, row, col, parent):
#TODO: drop files into library?
#TODO: deal with moving objects between tables
# i.e. copy from compounds table into file table
fids = data.data('application/x-aston-file')
if not parent.isValid():
new_parent = self.db
else:
new_parent = parent.internalPointer()
for db_id in [int(i) for i in fids.split(',')]:
obj = self.db.object_from_id(db_id)
if obj is not None:
obj._parent = new_parent
return True
def supportedDropActions(self):
return QtCore.Qt.MoveAction
def data(self, index, role):
fld = self.fields[index.column()]
obj = index.internalPointer()
rslt = None
if type(obj) is Project:
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if fld == 'name':
rslt = obj.name
elif role == QtCore.Qt.DecorationRole and index.column() == 0:
#TODO: icon for projects
pass
elif type(obj) is Run:
if fld == 'sel' and role == QtCore.Qt.CheckStateRole:
if self.master_window.pal_tab.has_run(obj, enabled=True):
rslt = QtCore.Qt.Checked
else:
rslt = QtCore.Qt.Unchecked
elif role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
if fld == 'name':
rslt = obj.name
else:
rslt = obj.info.get(fld, '')
elif role == QtCore.Qt.DecorationRole and index.column() == 0:
loc = resfile('aston/qtgui', 'icons/file.png')
rslt = QtGui.QIcon(loc)
elif type(obj) is Analysis:
if role == QtCore.Qt.DisplayRole:
if fld == 'name':
return obj.name
#elif role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
# if fld == 'p-model' and f.db_type == 'peak':
# rpeakmodels = {peak_models[k]: k for k in peak_models}
# rslt = rpeakmodels.get(f.info[fld], 'None')
# else:
# rslt = f.info[fld]
#elif role == QtCore.Qt.DecorationRole and index.column() == 0:
# #TODO: icon for method, compound
# fname = {'file': 'file.png', 'peak': 'peak.png', \
# 'spectrum': 'spectrum.png'}
# loc = resfile('aston/ui', 'icons/' + fname.get(f.db_type, ''))
# rslt = QtGui.QIcon(loc)
return rslt
def setData(self, index, data, role):
data = str(data)
col = self.fields[index.column()].lower()
obj = index.internalPointer()
if col == 'sel':
# handle this slightly differently b/c it's in a diff table
#TODO: use the current palette
if data == '2':
self.master_window.pal_tab.add_run(obj)
else:
self.master_window.pal_tab.del_run(obj)
elif col == 'name':
obj.name = data
self.db.merge(obj)
self.db.commit()
elif type(obj) is Run and col != 'sel':
obj.info[col] = data
self.db.merge(obj)
self.db.commit()
self.dataChanged.emit(index, index)
return True
def flags(self, index):
col = self.fields[index.column()]
obj = index.internalPointer()
dflags = QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
dflags |= QtCore.Qt.ItemIsDropEnabled
if not index.isValid():
return dflags
dflags |= QtCore.Qt.ItemIsDragEnabled
if col == 'sel' and type(obj) is Run:
dflags |= QtCore.Qt.ItemIsUserCheckable
elif col in ['r-filename', 'vis']:
pass
else:
dflags |= QtCore.Qt.ItemIsEditable
return dflags
def itemSelected(self):
#TODO: update an info window?
#remove the current spectrum
self.master_window.plotter.clear_highlight()
#remove all of the peak patches from the
#main plot and add new ones in
sel = self.returnSelFile()
self.master_window.specplotter.libscans = []
if sel is not None:
if sel.db_type == 'file':
# self.master_window.plotter.clear_peaks()
# if sel.getInfo('vis') == 'y':
# self.master_window.plotter.add_peaks( \
# sel.getAllChildren('peak'))
pass
elif sel.db_type == 'peak':
if sel.parent_of_type('file').info['vis'] == 'y':
self.master_window.plotter.draw_highlight_peak(sel)
elif sel.db_type == 'spectrum':
self.master_window.specplotter.libscans = [sel.data]
self.master_window.specplotter.plot()
objs_sel = len(self.returnSelFiles())
self.master_window.show_status(str(objs_sel) + ' items selected')
def colsChanged(self, *_): # don't care about the args
flds = [self.fields[self.tree_view.header().logicalIndex(fld)] \
for fld in range(len(self.fields))]
self.db.set_key('main_cols', json.dumps(flds))
def click_main(self, point):
#index = self.proxy_mod.mapToSource(self.tree_view.indexAt(point))
menu = QtGui.QMenu(self.tree_view)
sel = self.returnSelFiles()
def _add_menu_opt(self, name, func, objs, menu):
ac = menu.addAction(name, self.click_handler)
ac.setData((func, objs))
#Things we can do with peaks
fts = [s for s in sel if s.db_type == 'peak']
if len(fts) > 0:
self._add_menu_opt(self.tr('Create Spec.'), \
self.createSpec, fts, menu)
self._add_menu_opt(self.tr('Merge Peaks'), \
self.merge_peaks, fts, menu)
fts = [s for s in sel if s.db_type in ('spectrum', 'peak')]
if len(fts) > 0:
self._add_menu_opt(self.tr('Find in Lib'), \
self.find_in_lib, fts, menu)
##Things we can do with files
#fts = [s for s in sel if s.db_type == 'file']
#if len(fts) > 0:
# self._add_menu_opt(self.tr('Copy Method'), \
# self.makeMethod, fts, menu)
#Things we can do with everything
if len(sel) > 0:
self._add_menu_opt(self.tr('Delete Items'), \
self.delete_objects, sel, menu)
#self._add_menu_opt(self.tr('Debug'), self.debug, sel)
if not menu.isEmpty():
menu.exec_(self.tree_view.mapToGlobal(point))
def click_handler(self):
func, objs = self.sender().data()
func(objs)
def delItemKey(self):
self.delete_objects(self.returnSelFiles())
def debug(self, objs):
pks = [o for o in objs if o.db_type == 'peak']
for pk in pks:
x = pk.data[:, 0]
y = pk.as_gaussian()
plt = self.master_window.plotter.plt
plt.plot(x, y, '-')
self.master_window.plotter.canvas.draw()
def merge_peaks(self, objs):
from aston.Math.Integrators import merge_ions
new_objs = merge_ions(objs)
self.delete_objects([o for o in objs if o not in new_objs])
def createSpec(self, objs):
with self.db:
for obj in objs:
obj._children += [obj.as_spectrum()]
def find_in_lib(self, objs):
for obj in objs:
if obj.db_type == 'peak':
spc = obj.as_spectrum().data
elif obj.db_type == 'spectrum':
spc = obj.data
lib_spc = self.master_window.cmpd_tab.db.find_spectrum(spc)
if lib_spc is not None:
obj.info['name'] = lib_spc.info['name']
obj.save_changes()
#def makeMethod(self, objs):
# self.master_window.cmpd_tab.addObjects(None, objs)
def click_head(self, point):
menu = QtGui.QMenu(self.tree_view)
subs = OrderedDict()
for n in aston_groups:
subs[n] = QtGui.QMenu(menu)
for fld in aston_fields:
if fld == 'name':
continue
grp = fld.split('-')[0]
if grp in subs:
ac = subs[grp].addAction(aston_fields[fld], \
self.click_head_handler)
else:
ac = menu.addAction(aston_fields[fld], \
self.click_head_handler)
ac.setData(fld)
ac.setCheckable(True)
if fld in self.fields:
ac.setChecked(True)
for grp in subs:
ac = menu.addAction(aston_groups[grp])
ac.setMenu(subs[grp])
menu.exec_(self.tree_view.mapToGlobal(point))
def click_head_handler(self):
fld = str(self.sender().data())
if fld == 'name':
return
if fld in self.fields:
indx = self.fields.index(fld)
self.beginRemoveColumns(QtCore.QModelIndex(), indx, indx)
for i in range(len(self.db._children)):
self.beginRemoveColumns( \
self.index(i, 0, QtCore.QModelIndex()), indx, indx)
self.fields.remove(fld)
for i in range(len(self.db._children) + 1):
self.endRemoveColumns()
else:
cols = len(self.fields)
self.beginInsertColumns(QtCore.QModelIndex(), cols, cols)
for i in range(len(self.db._children)):
self.beginInsertColumns( \
self.index(i, 0, QtCore.QModelIndex()), cols, cols)
self.tree_view.resizeColumnToContents(len(self.fields) - 1)
self.fields.append(fld)
for i in range(len(self.db._children) + 1):
self.endInsertColumns()
self.enableComboCols()
self.colsChanged()
#FIXME: selection needs to be updated to new col too?
#self.tree_view.selectionModel().selectionChanged.emit()
def _obj_to_index(self, obj):
if obj is None or obj == self.db:
return QtCore.QModelIndex()
elif obj in self.db._children:
row = self.db._children.index(obj)
else:
row = obj._parent._children.index(obj)
return self.createIndex(row, 0, obj)
def active_file(self):
"""
Returns the file currently selected in the file list.
If that file is not visible, return the topmost visible file.
Used for determing which spectra to display on right click, etc.
"""
dt = self.returnSelFile()
if dt is not None:
if dt.db_type == 'file' and dt.info['vis'] == 'y':
return dt
dts = self.returnChkFiles()
if len(dts) == 0:
return None
else:
return dts[0]
def returnChkFiles(self, node=None):
"""
Returns the files checked as visible in the file list.
"""
if node is None:
node = QtCore.QModelIndex()
chkFiles = []
for i in range(self.proxy_mod.rowCount(node)):
prjNode = self.proxy_mod.index(i, 0, node)
f = self.proxy_mod.mapToSource(prjNode).internalPointer()
if f.info['vis'] == 'y':
chkFiles.append(f)
if len(f._children) > 0:
chkFiles += self.returnChkFiles(prjNode)
return chkFiles
def returnSelFile(self):
"""
Returns the file currently selected in the file list.
Used for determing which spectra to display on right click, etc.
"""
tab_sel = self.tree_view.selectionModel()
if not tab_sel.currentIndex().isValid:
return
ind = self.proxy_mod.mapToSource(tab_sel.currentIndex())
if ind.internalPointer() is None:
return # it doesn't exist
return ind.internalPointer()
def returnSelFiles(self, cls=None):
"""
Returns the files currently selected in the file list.
Used for displaying the peak list, etc.
"""
tab_sel = self.tree_view.selectionModel()
files = []
for i in tab_sel.selectedRows():
obj = i.model().mapToSource(i).internalPointer()
if cls is None or obj.db_type == cls:
files.append(obj)
return files
def items_as_csv(self, itms, delim=',', incHeaders=True):
flds = [self.fields[self.tree_view.header().logicalIndex(fld)] \
for fld in range(len(self.fields))]
row_lst = []
block_col = ['vis']
for i in itms:
col_lst = [i.info[col] for col in flds \
if col not in block_col]
row_lst.append(delim.join(col_lst))
if incHeaders:
try: # for python 2
flds = [unicode(aston_fields[i]) for i in flds \
if i not in ['vis']]
except: # for python 3
flds = [aston_fields[i] for i in flds \
if i not in ['vis']]
header = delim.join(flds) + '\n'
table = '\n'.join(row_lst)
return header + table
| gpl-3.0 | 5,619,593,434,808,734,000 | 37.141962 | 80 | 0.559989 | false |
foolonhill/rietveld | third_party/oauth2client/keyring_storage.py | 273 | 3227 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A keyring based Storage.
A Storage for Credentials that uses the keyring module.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import keyring
import threading
from client import Storage as BaseStorage
from client import Credentials
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from the keyring.
To use this module you must have the keyring module installed. See
<http://pypi.python.org/pypi/keyring/>. This is an optional module and is not
installed with oauth2client by default because it does not work on all the
platforms that oauth2client supports, such as Google App Engine.
The keyring module <http://pypi.python.org/pypi/keyring/> is a cross-platform
library for access the keyring capabilities of the local system. The user will
be prompted for their keyring password when this module is used, and the
manner in which the user is prompted will vary per platform.
Usage:
from oauth2client.keyring_storage import Storage
s = Storage('name_of_application', 'user1')
credentials = s.get()
"""
def __init__(self, service_name, user_name):
"""Constructor.
Args:
service_name: string, The name of the service under which the credentials
are stored.
user_name: string, The name of the user to store credentials for.
"""
self._service_name = service_name
self._user_name = user_name
self._lock = threading.Lock()
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant."""
self._lock.acquire()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._lock.release()
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials
def locked_put(self, credentials):
"""Write Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name,
credentials.to_json())
def locked_delete(self):
"""Delete Credentials file.
Args:
credentials: Credentials, the credentials to store.
"""
keyring.set_password(self._service_name, self._user_name, '')
| apache-2.0 | -8,485,844,601,790,202,000 | 28.605505 | 80 | 0.699721 | false |
Jumpscale/web | pythonlib/watchdog/utils/dirsnapshot.py | 3 | 8949 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
# Copyright 2014 Thomas Amland <thomas.amland@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.utils.dirsnapshot
:synopsis: Directory snapshots and comparison.
:author: yesudeep@google.com (Yesudeep Mangalapilly)
.. ADMONITION:: Where are the moved events? They "disappeared"
This implementation does not take partition boundaries
into consideration. It will only work when the directory
tree is entirely on the same file system. More specifically,
any part of the code that depends on inode numbers can
break if partition boundaries are crossed. In these cases,
the snapshot diff will represent file/directory movement as
created and deleted events.
Classes
-------
.. autoclass:: DirectorySnapshot
:members:
:show-inheritance:
.. autoclass:: DirectorySnapshotDiff
:members:
:show-inheritance:
"""
import os
from stat import S_ISDIR
from watchdog.utils import platform
from watchdog.utils import stat as default_stat
class DirectorySnapshotDiff(object):
"""
Compares two directory snapshots and creates an object that represents
the difference between the two snapshots.
:param ref:
The reference directory snapshot.
:type ref:
:class:`DirectorySnapshot`
:param snapshot:
The directory snapshot which will be compared
with the reference snapshot.
:type snapshot:
:class:`DirectorySnapshot`
"""
def __init__(self, ref, snapshot):
created = snapshot.paths - ref.paths
deleted = ref.paths - snapshot.paths
# check that all unchanged paths have the same inode
for path in ref.paths & snapshot.paths:
if ref.inode(path) != snapshot.inode(path):
created.add(path)
deleted.add(path)
# find moved paths
moved = set()
for path in set(deleted):
inode = ref.inode(path)
new_path = snapshot.path(inode)
if new_path:
# file is not deleted but moved
deleted.remove(path)
moved.add((path, new_path))
for path in set(created):
inode = snapshot.inode(path)
old_path = ref.path(inode)
if old_path:
created.remove(path)
moved.add((old_path, path))
# find modified paths
# first check paths that have not moved
modified = set()
for path in ref.paths & snapshot.paths:
if ref.inode(path) == snapshot.inode(path):
if ref.mtime(path) != snapshot.mtime(path):
modified.add(path)
for (old_path, new_path) in moved:
if ref.mtime(old_path) != snapshot.mtime(new_path):
modified.add(old_path)
self._dirs_created = [path for path in created if snapshot.isdir(path)]
self._dirs_deleted = [path for path in deleted if ref.isdir(path)]
self._dirs_modified = [path for path in modified if ref.isdir(path)]
self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)]
self._files_created = list(created - set(self._dirs_created))
self._files_deleted = list(deleted - set(self._dirs_deleted))
self._files_modified = list(modified - set(self._dirs_modified))
self._files_moved = list(moved - set(self._dirs_moved))
@property
def files_created(self):
"""List of files that were created."""
return self._files_created
@property
def files_deleted(self):
"""List of files that were deleted."""
return self._files_deleted
@property
def files_modified(self):
"""List of files that were modified."""
return self._files_modified
@property
def files_moved(self):
"""
List of files that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._files_moved
@property
def dirs_modified(self):
"""
List of directories that were modified.
"""
return self._dirs_modified
@property
def dirs_moved(self):
"""
List of directories that were moved.
Each event is a two-tuple the first item of which is the path
that has been renamed to the second item in the tuple.
"""
return self._dirs_moved
@property
def dirs_deleted(self):
"""
List of directories that were deleted.
"""
return self._dirs_deleted
@property
def dirs_created(self):
"""
List of directories that were created.
"""
return self._dirs_created
class DirectorySnapshot(object):
"""
A snapshot of stat information of files in a directory.
:param path:
The directory path for which a snapshot should be taken.
:type path:
``str``
:param recursive:
``True`` if the entire directory tree should be included in the
snapshot; ``False`` otherwise.
:type recursive:
``bool``
:param walker_callback:
.. deprecated:: 0.7.2
:param stat:
Use custom stat function that returns a stat structure for path.
Currently only st_dev, st_ino, st_mode and st_mtime are needed.
A function with the signature ``walker_callback(path, stat_info)``
which will be called for every entry in the directory tree.
:param listdir:
Use custom listdir function. See ``os.listdir`` for details.
"""
def __init__(self, path, recursive=True,
walker_callback=(lambda p, s: None),
stat=default_stat,
listdir=os.listdir):
self._stat_info = {}
self._inode_to_path = {}
st = stat(path)
self._stat_info[path] = st
self._inode_to_path[(st.st_ino, st.st_dev)] = path
def walk(root):
paths = [os.path.join(root, name) for name in listdir(root)]
entries = []
for p in paths:
try:
entries.append((p, stat(p)))
except OSError:
continue
for _ in entries:
yield _
if recursive:
for path, st in entries:
if S_ISDIR(st.st_mode):
for _ in walk(path):
yield _
for p, st in walk(path):
i = (st.st_ino, st.st_dev)
self._inode_to_path[i] = p
self._stat_info[p] = st
walker_callback(p, st)
@property
def paths(self):
"""
Set of file/directory paths in the snapshot.
"""
return set(self._stat_info.keys())
def path(self, id):
"""
Returns path for id. None if id is unknown to this snapshot.
"""
return self._inode_to_path.get(id)
def inode(self, path):
""" Returns an id for path. """
st = self._stat_info[path]
return (st.st_ino, st.st_dev)
def isdir(self, path):
return S_ISDIR(self._stat_info[path].st_mode)
def mtime(self, path):
return self._stat_info[path].st_mtime
def stat_info(self, path):
"""
Returns a stat information object for the specified path from
the snapshot.
Attached information is subject to change. Do not use unless
you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`,
:func:`isdir` instead.
:param path:
The path for which stat information should be obtained
from a snapshot.
"""
return self._stat_info[path]
def __sub__(self, previous_dirsnap):
"""Allow subtracting a DirectorySnapshot object instance from
another.
:returns:
A :class:`DirectorySnapshotDiff` object.
"""
return DirectorySnapshotDiff(previous_dirsnap, self)
def __str__(self):
return self.__repr__()
def __repr__(self):
return str(self._stat_info)
| apache-2.0 | -3,603,700,851,997,609,000 | 30.510563 | 79 | 0.586546 | false |
Syrcon/servo | tests/wpt/css-tests/tools/webdriver/webdriver/searchcontext.py | 251 | 2153 | """WebDriver element location functionality."""
class SearchContext(object):
"""Abstract class that provides the core element location functionality."""
def find_element_by_css(self, selector):
"""Find the first element matching a css selector."""
return self._find_element('css selector', selector)
def find_elements_by_css(self, selector):
"""Find all elements matching a css selector."""
return self._find_elements('css selector', selector)
def find_element_by_link_text(self, text):
"""Find the first link with the given text."""
return self._find_element('link text', text)
def find_elements_by_link_text(self, text):
"""Find all links with the given text."""
return self._find_elements('link text', text)
def find_element_by_partial_link_text(self, text):
"""Find the first link containing the given text."""
return self._find_element('partial link text', text)
def find_elements_by_partial_link_text(self, text):
"""Find all links containing the given text."""
return self._find_elements('partial link text', text)
def find_element_by_xpath(self, xpath):
"""Find the first element matching the xpath."""
return self._find_element('xpath', xpath)
def find_elements_by_xpath(self, xpath):
"""Find all elements matching the xpath."""
return self._find_elements('xpath', xpath)
def _find_element(self, strategy, value):
return self.execute('POST',
'/element',
'findElement',
self._get_locator(strategy, value))
def _find_elements(self, strategy, value):
return self.execute('POST',
'/elements',
'findElements',
self._get_locator(strategy, value))
def _get_locator(self, strategy, value):
if self.mode == 'strict':
return {'strategy': strategy, 'value': value}
elif self.mode == 'compatibility':
return {'using': strategy, 'value': value}
| mpl-2.0 | 7,807,555,079,920,232,000 | 38.87037 | 79 | 0.595448 | false |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/modules/ui_utils.py | 6 | 1405 | # -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
"""
ui_utils.py
Some UI utility functions
"""
class GUI:
@classmethod
def drawIconButton(cls, enabled, layout, iconName, operator, frame=True):
col = layout.column()
col.enabled = enabled
bt = col.operator(operator,
text='',
icon=iconName,
emboss=frame)
@classmethod
def drawTextButton(cls, enabled, layout, text, operator, frame=True):
col = layout.column()
col.enabled = enabled
bt = col.operator(operator,
text=text,
emboss=frame)
| gpl-3.0 | -3,475,608,645,145,878,000 | 26.54902 | 77 | 0.651957 | false |
luci/luci-py | appengine/isolate/proto/isolated_prpc_pb2.py | 2 | 7397 | # Generated by the pRPC protocol buffer compiler plugin. DO NOT EDIT!
# source: isolated.proto
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the isolated.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'eJy9Ws1vG0l2H4qWTLc1njbHMyNrvmo1q7E4Q5H6sMdrabJYSqTk9lCklh/22oOB3WoWpd5pdn'
'PZTcmar0OQwy4SLJB7guzmuPf8AbkGOQcIcsklQP6BADkF+b1X1c2WLdvJBogg21WvXr2vevU+'
'qm385fvGFTcMPDuSvdJwFERB/nIyP16d//AwCA49Wealg3G/HLkDGUb2YKiwF36fMWbbkR2FLf'
'mrMVbym8Zl2h1Gjwl1LiMyS5fX5kuKTimmU+rEdFqGQidA/rZhjCTYjyM38OemsPfK2jullECl'
'VrLcSqHm3zUuDe1D+Th0v5FzWeybbuUI0MZ8oWm8rkUMh4EfyvxPjdmBtMPxSA6kH4UQMstCph'
'nxjrZvD8OjIGqdwV/41ylNMV7P3zEMaDP6Hyt9ibFZ5znj4njoBXYvZIWzrXia/8h4XQ8fH5zC'
'RqxYtjWrgVsEy79nXOoFJ74icIERJoD8DeONZKKJTDPOlQSsyHxqXHUCP7JdP3w8UkcZzs0wqh'
'kv6CMO8wUjgT32guDr8TCcu8i4b8TwugLn541cQi7HKMmc1vq268Gs4dwltRbPP9k1jMlRA/Pt'
'Vq3drHc7VrPxuNto79e2rR2rVjVfyxvGzJ7V6HZqZiafMy7cbXZb5lT+opGtVh6a2bW6kbP0se'
'Z/ZkzzseWvP3/UWrn5c7wg9puF1+7989tGzrxgvmbumhnj7zK5WZ7k1/6QEdvB8HTkHh5FYm1l'
'9SeicyRFvbttico4OgpGYUlUPE8wQiigoRwdg4UhuqEUQV9ER24owmA8cqRwgp4UmB4Gx3Lky5'
'4Y+z05AooUlaHtEGHXkRCoKO7LUQgDibXSigEEOxKO7YsDKfoBNgnX5111a7vWaNdE34U3GkYu'
'N2XOQOxrGGZzr5k5jAvGbG4GYwPjK9CNZ7kM5jnTNL7k2RTWZ80p8+b8F4INAz2i8ciH4Ji5Ye'
'Q6IRiPlDJ0E8TI9g+lgFgkhj54KDS5uRDHjImD3SwEu5yCTAEya86lIFnMPzJXjM9z0xDnKsR9'
'28zMl8TEW0QUiHEoWZI2zAxblWJxWQDwnKXd4HfVnDbfMK7wjNTLQ733wW06kYggcynIFCDvmu'
'8lezLmm8B4PcHIaEguBZkC5LI5m+yZMq8B43KCQVQIMpOCEM4l00j2ZM23gGEkGFnsIch0CjIF'
'SM68ZHyauwBdrsM2C7DN+0L7NlskvgzKJGyKC6zodWy9ZvxLhqdki/dBvTj/DxmhAjR5JJ2iZ2'
'OMMDg6JUPD1cKhdNy+KxN3i80srL7wg2iCUDRiWj3Zt8cejgQkaIszHo1Ak52G932nEL9jrkEA'
'mg6tq38M5u/KUJy4uFSQYeCGoesfCptlPBX2SDLrU4kbEQyGY9KYjKeVy82weu+mIBlA3jNvpC'
'BZQD4xPzVuaEjG/BB73pt/Jzbo856sEUGeUGdTENr8uvlOCpIFZB4iVDVkyvwR9rw9f1Ps2U/d'
'wXgg/PHgABcf4eFMtkm0jwJ9A1O8p3LTTOdiCpIBJGdeTUGygFwz32JPyZiLFAC0p6hQRzzP9Z'
'QMo+ew9a8zPCVPWQK/T+b/IsNi6jgQxsLCKUYSgQxEnaNR4AdecOg6tieCEaIaDq1EsRLnpSPX'
'UeDJ0Ih9yQO5TRqN5I1Q2HAczz4FGlxZxk4Rm4MO3RlJe3LWLF7uAguYhswAcpmvdQzJAHLdXE'
'xBsoAsIS5uAIKLoGL+fPGZo4jznQhVpEmFQm0xMn+RLVbgGRmsBHk+nb9OtEbRJPxrolr4KS1q'
'KXHUKS1qCY76cQqSBaRgfmLsa0jGXMWea/M/E43Eg1y/5x67vTEMr+sHnS9wOzwZxbdXB+7+yB'
'7IlBwZeNVq4lVT2qNXodYbKUgWkLz5pvFXGQ2aMm9h0/z8n2dSknAZopif0LmHY8eRYdgfe96p'
'lu0F0sBRADm2vTGd/ZCyqB+pqERqYB5iJ9MvCrefACkpnCA0ICv0UkrRVbl1Riky3S0+qwkkC8'
'iceT0xbta8zdf0BcZN6iqlIdd5rzBuFnLcPiMHBffbyZVVEOJLV/ZvYuNeMDc5HP32pcbV/FWo'
'joX7/7HvBei1eUYvCjab0OudFCQLCIXBLzRk2vwp9nwwv5nSqpxcs7h4FCiqHSXPy0w7rcldTE'
'EygORwoBNIFpD3kPcHGjJjVqgOmP9KdIIIZzqJw24kB6GgmpfKsiFnVGUnXH0dshwq0Yo0sw8P'
'R/LQpuxgCNsZBSGCFBJWa387TAk5AyErZ4ScgZAVCDmXgmQBocLjcw25aFax5y1EpGeFTGyEQ0'
'egdcYUxsXP99spnhfBs3qG50XwrHK1N4FkAXkTVcGyhuTMHeb5fups/MBfXltZSbimmOTAZOcM'
'kxyY7JxhkgOTHWJyMMN90rrxX2XjVW1n/o1nWquFTeNS0l1RSxVKuEwv5FYMLZWe5q8Z077tB6'
'rVmm6pydYPxptw5Wfbta0rCcV9Au1nHn166EZH44MSsMuHyI7+4UTEYXQ6lOFE0v/MZP52Kru7'
'v/WHqQ92FeX9uBF8ID3vCx+XsUN77v1ZybhkfoAM85sMSu9/nEVb8QG3FX8/K3iPE3hia9zvI4'
'uKZaGoIR327MiGn0Vy5BxxoQ13HNgoj9K9yMpP9AZh+U5JvKANOYqiYbhRLveQqr1gCEaxOUjX'
'oRZi+UAJUUasaMke8tzIPVBFt412g6puuL1uYwhy4Po2CkWSCyHjBNZD2ud/gzHkHAQ9FIYO35'
'Ai529wHrgRRSzwREil0HWk+4d+4HnBCZV5dJgubeKkDzoy2oBI9PPJM4KF5KPpxmowDqmOpHDC'
'VO0DtFlY0hYzqG5EIi+qoEL1B1FIc/R7z4gDfo5n4+BHpRcJAWYpW8RCQMfeGIIlchgTQf5Pci'
'T1Uy9wxvRoYceHVIb9A6qnBDwF9ZfthRNT8wFh0RBp6ROlGtLlnUTYR6AlgdK+5QeTtVCFy9Dg'
'EolJoQsGU6reOFNQeJJ+L6DKkMNoMAgQp5RN4J1UHcI5RR8LRtwg96MTchPtQXFr4WCXS441It'
'/xlRdxXuKsdtdqi3Zzp/Og0qoJjPdbzftWtVYVWw+xWBPbzf2HLWv3bkfcbdartVZbVBpVQBud'
'lrXV7TRbbUMsVNrYusArlcZDUfvFfqvWbotmS1h7+3UL1EC+VWl0rFq7KKzGdr1btRq7RQEKot'
'HsGGjD96wO8DrNIrN9fp9o7oi9Wmv7LqaVLatudR4ywx2r0yBmO82WISpiv9LqWNvdeqUl9rut'
'/SZae9KsarW36xVrr1ZFC9UAT1G7X2t0RPtupV4/q6ghmg8atRZJn1ZTbNUgZWWrXiNWrGfVat'
'W2O6TQZLQN40HAOvo5fonBCPaoQZ1K62FRE23Xft4FFhZFtbJX2YV2S6+yCg5mu9uq7ZHUMEW7'
'u9XuWJ1upyZ2m80qG7tda923tmvtTVFvttlg3XYNglQrnQqzBg2YC+sYb3XbFhvOanRqrVZ3n9'
'6PCjjlB7AMpKxgb5Ut3GyQtuQrtWbrIZElO/AJFMWDuzXAW2RUtlaFzNCG1bY7aTQwhBGh0kRP'
'0ajt1q3dWmO7RstNIvPAatcKODCrTQgWM4YPgGmXtaaDglyGGqdct8jnKawdUanet0hyjQ0PaF'
'vaXdhs23e1zfVrj0A2mcMoZy5gtEnA3KIeE/QjjD5k6Id6TNAfY7TF0Mt6TFBqFosMzegxQT/G'
'qMzQeEyjG/QEwVBDjwm6hNGPGPpjPf6Pd/nF4gedAuf/7V14eZJ907WoLYYBMh6HN3pgQuUth4'
'gi1Aci4Nj+qYJ/E/gcVbwAFZBBZRCQ7FEREYeyQI9eCmyEprHap+sDjqmoIZ1J5ogXKDFQscDz'
'dNvPL32KkEspFcUWwhYFN/Szchg4R/T+1e1si4Hb8zmyUzl4z/bHlA5Wi2L1zu2VYhywEf48OU'
'TkF7uoHQMEaD+RXpwcuSAnnyLGcYOBQH0O1oHtfI0o2eMC8FQCAmNQIKTUP3D9caSb5c9WEv28'
'gBrxurSHE5WBsRAOsF/2FhB6VSL2A+EBy9BoIrIPPH6t9KUku1JFzCXJkHKsSuxj9TYjvly7uY'
'ywjVNxfZAFDaL+1dLLiw86zzJjFlSPEj8rginlBrGysrK6zL+dlZUN/n1Eqt/Bz/Lq2vL6amdt'
'fePWHfyW7sQ/j0pi69Sgg0RywnlDwEiryNRRrUg4Cz36x+0Uv1BA6WOJpp3PVyUn8WVrZ9sQ6+'
'vrdya6nJyclFwZ9UvB6LA86jv0hzBK0dOoQJUb9Wb0/EEFs/hI1J7a1IiHmOihWN1AIcdvV6m7'
'wAxx4a1fiCdkmaXCk5IufSZISRG6qVYm5XMoo8f6gJd4e6NbrxcK5+Kxvy+tYHEi09qrZDqUEV'
'EJ+j37NCUbdEVSZwboL0V0rDmeQf84Oi4KFmjzj1XpuBQd0+xlGikklCAOappVeM8ZDddfqOED'
'119fE092ZdQ+DdEI0nIl3HE92Tl7EDtWvdZBHhb9SIvxoj0f96NY0i5y1Gc3IbDzdSj+RCwtLS'
'lIoR+Veid3ETiqcBraVRCffy7W1wriO8Fr9eAkXortVi4jgEJeNPwhk6TLAlVTMSwsJQgqSq1+'
'9vw1SqjR9tXPbt68eXv9s5VJ2NAPcV3ffRpTQTB7lkrpjzvMJaU/TKGMUubDop8CuqCUOK/wYK'
'JD5orpLKbosAMUzjjAzRc6wD372BZP1EGW9JM1oey5HurzlANQNEWkJSiO8sUbXuLm2JdAS748'
'2Rq7HiripQIp1tYW0iyUYQqKFv0QTkPpjlhMmmtMpbpWmy1QKB0QZZZlYoNbL7RB+qWectb+KS'
'pxP1b8XPGXCs+eDa7D9sQaWKcIeK+NImzPHg4RFAGwfAVRPW2Rk2PKTmicySnPpHMVUHUmNTgs'
'/6+ismJFGd2mZF5UZBSUmC18S9n0++VvB2hpjvAvgtb3nW8ppX2/8S0yK/6G837/ZelbKiLIkb'
'//6tGCgbxNb3FqNxGyvRP7FMI/jd/UVIbsU27suYdomyjV4xw0p6JgVihzFTPMiVuRUxCz5Gz9'
'jRwFy0O711PNVXQSxNSk7RypSiWubqgq0hetqOsKSm+HAT1rUfKMty65JVnSwNXza6ACBCP+wV'
'BRVpwWHqFqGPf7CA30PurY6mVSsh9wfba0gLJoobB5BmqoMupXYxd1RwlhTD0LKWcIuWN1oagI'
'YQmvF5uSnh6oxlqyw4QbPVUaJEaBDsCnHtFXif55VyJD2mdYDW00pgmbA8jFlQ7lfcdBhSYO0E'
'YzT9qrWupYh/A5OagYDPp93EsuYnZQJUl114piYW1l9TbFzNVbnZXVjfWVjdVbpZVVmE95N0Iv'
'zZOgO7RDVKOMyfzR2CfV5K0ifX6+XdIXCAGr7YzcIe4PGTxdwNiCkoYIDn4pnUjVPm4YO7vyR3'
'Z/GImqyh7uUxRY7WabL9lS4ZyyrTQIvkGcsfl2SX+52y73AicsP5AH5Yko5Zbs4zr4jizvesGB'
'7T1usgxhmQQqp5gU+GXnKIAbWHGkKfI9VyKJJ1RH8TfDePAkVkh/TdLaSoh/nopQ6gmiRp+3pj'
'SC1KWhimyky1rZcw9GMDAXo6WjaOB9xKN4b4FfJIzEkWMm9D4hbiw+XF4cLC/2Oot3Nxb3Nhbb'
'pcX+oxsot92v5YkbqsdiMtDklODPitq9oGezs94IIStME6f6HRWsenqK7PPVknrH03Hul9jJ0t'
'Ngmatoe+jygcRQVVsrWcvP02Y9YwaLa1X8GqJAhgwO+P3M1nqi3IfUQ74gaJoOpS9Htrpq8TUL'
'k3d6HWWRbpJvzz/wt47fTb49/2mGP2D9NiNak94v9n9wILdnO+MQnXT9YZxfgIg9enCj/xfxko'
'bBOK9jeAS5HQ/Ockwt1NXk4/C0kvFiCpQhEH0Rm4CyBKJPYv8e65Yxf0378vP/lBGNwF/2+QPB'
'sTzbdtpxe0Ud1/ltZ0NvTDox/m4Tqje8CTF+aQwj+k5+ZB9Lfq5PeDJpvTH+/yPcyeKMqIOM2+'
'xn7ae7q6L+Y5xrI/pm+OuzNsoo9XPm6ylQlkCmeTX+APDfdjTQIQ==')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
IsolatedServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'isolated.proto']['descriptor'],
'service_descriptor': _INDEX[u'isolated.proto']['services'][u'Isolated'],
}
| apache-2.0 | -6,725,266,536,144,388,000 | 67.490741 | 80 | 0.871029 | false |
mspark93/VTK | ThirdParty/Twisted/twisted/test/test_stateful.py | 34 | 2009 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.stateful
"""
from twisted.trial.unittest import TestCase
from twisted.protocols.test import test_basic
from twisted.protocols.stateful import StatefulProtocol
from struct import pack, unpack, calcsize
class MyInt32StringReceiver(StatefulProtocol):
"""
A stateful Int32StringReceiver.
"""
MAX_LENGTH = 99999
structFormat = "!I"
prefixLength = calcsize(structFormat)
def getInitialState(self):
return self._getHeader, 4
def lengthLimitExceeded(self, length):
self.transport.loseConnection()
def _getHeader(self, msg):
length, = unpack("!i", msg)
if length > self.MAX_LENGTH:
self.lengthLimitExceeded(length)
return
return self._getString, length
def _getString(self, msg):
self.stringReceived(msg)
return self._getHeader, 4
def stringReceived(self, msg):
"""
Override this.
"""
raise NotImplementedError
def sendString(self, data):
"""
Send an int32-prefixed string to the other end of the connection.
"""
self.transport.write(pack(self.structFormat, len(data)) + data)
class TestInt32(MyInt32StringReceiver):
def connectionMade(self):
self.received = []
def stringReceived(self, s):
self.received.append(s)
MAX_LENGTH = 50
closed = 0
def connectionLost(self, reason):
self.closed = 1
class Int32TestCase(TestCase, test_basic.IntNTestCaseMixin):
protocol = TestInt32
strings = ["a", "b" * 16]
illegalStrings = ["\x10\x00\x00\x00aaaaaa"]
partialStrings = ["\x00\x00\x00", "hello there", ""]
def test_bigReceive(self):
r = self.getProtocol()
big = ""
for s in self.strings * 4:
big += pack("!i", len(s)) + s
r.dataReceived(big)
self.assertEqual(r.received, self.strings * 4)
| bsd-3-clause | -5,977,447,755,338,609,000 | 23.802469 | 73 | 0.634146 | false |
datenbetrieb/odoo | addons/marketing_campaign/__openerp__.py | 260 | 3127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Marketing Campaigns',
'version': '1.1',
'depends': ['marketing',
'document',
'email_template',
'decimal_precision'
],
'author': 'OpenERP SA',
'category': 'Marketing',
'description': """
This module provides leads automation through marketing campaigns (campaigns can in fact be defined on any resource, not just CRM Leads).
=========================================================================================================================================
The campaigns are dynamic and multi-channels. The process is as follows:
------------------------------------------------------------------------
* Design marketing campaigns like workflows, including email templates to
send, reports to print and send by email, custom actions
* Define input segments that will select the items that should enter the
campaign (e.g leads from certain countries.)
* Run your campaign in simulation mode to test it real-time or accelerated,
and fine-tune it
* You may also start the real campaign in manual mode, where each action
requires manual validation
* Finally launch your campaign live, and watch the statistics as the
campaign does everything fully automatically.
While the campaign runs you can of course continue to fine-tune the parameters,
input segments, workflow.
**Note:** If you need demo data, you can install the marketing_campaign_crm_demo
module, but this will also install the CRM application as it depends on
CRM Leads.
""",
'website': 'https://www.odoo.com/page/lead-automation',
'data': [
'marketing_campaign_view.xml',
'marketing_campaign_data.xml',
'marketing_campaign_workflow.xml',
'report/campaign_analysis_view.xml',
'security/marketing_campaign_security.xml',
'security/ir.model.access.csv'
],
'demo': ['marketing_campaign_demo.xml'],
'test': ['test/marketing_campaign.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,702,719,564,366,317,000 | 43.042254 | 137 | 0.606652 | false |
tsvstar/vk_downloader | vk/tests.py | 1 | 1723 | # coding=utf8
import os
import sys
import time
import unittest
import vk
from vk.utils import HandyList, make_handy, HandyDict
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
# copy to test_props.py and fill it
APP_ID = '4643961' # aka API/Client id
USER_LOGIN = 'tsvstar@gmail.com' # user email or phone number
USER_PASSWORD = 'byyfrbtd'
from test_props import APP_ID, USER_LOGIN, USER_PASSWORD
class VkTestCase(unittest.TestCase):
def setUp(self):
self.vk_api = vk.API(APP_ID, USER_LOGIN, USER_PASSWORD)
self.vk_token_api = vk.API(access_token=self.vk_api.access_token)
def test_get_server_time(self):
time_1 = time.time() - 1
time_2 = time_1 + 10
server_time = self.vk_api.getServerTime()
self.assertTrue(time_1 <= server_time <= time_2)
def test_get_server_time_via_token_api(self):
time_1 = time.time() - 1
time_2 = time_1 + 10
server_time = self.vk_token_api.getServerTime()
self.assertTrue(time_1 <= server_time <= time_2)
def test_get_profiles_via_token(self):
profiles = self.vk_api.users.get(user_id=1)
profiles = make_handy(profiles)
self.assertEqual(profiles.first.last_name, u'Дуров')
class HandyContainersTestCase(unittest.TestCase):
def test_list(self):
handy_list = make_handy([1, 2, 3])
self.assertIsInstance(handy_list, HandyList)
self.assertEqual(handy_list.first, 1)
def test_handy_dict(self):
handy_dict = make_handy({'key1': 'val1', 'key2': 'val2'})
self.assertIsInstance(handy_dict, HandyDict)
self.assertEqual(handy_dict.key1, 'val1')
if __name__ == '__main__':
unittest.main()
| mit | 3,941,724,729,523,370,000 | 27.163934 | 73 | 0.6461 | false |
dongguangming/pygal | pygal/test/test_bar.py | 6 | 1221 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2015 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
"""Bar chart related tests"""
from pygal import Bar
def test_simple_bar():
"""Simple bar test"""
bar = Bar()
rng = [-3, -32, -39]
bar.add('test1', rng)
bar.add('test2', map(abs, rng))
bar.x_labels = map(str, rng)
bar.title = "Bar test"
q = bar.render_pyquery()
assert len(q(".axis.x")) == 1
assert len(q(".axis.y")) == 1
assert len(q(".legend")) == 2
assert len(q(".plot .series rect")) == 2 * 3
| lgpl-3.0 | 7,964,814,917,312,821,000 | 31.972973 | 79 | 0.67377 | false |
xiandaicxsj/qemu-copy | scripts/qtest.py | 34 | 1911 | # QEMU qtest library
#
# Copyright (C) 2015 Red Hat Inc.
#
# Authors:
# Fam Zheng <famz@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Based on qmp.py.
#
import errno
import socket
class QEMUQtestProtocol(object):
def __init__(self, address, server=False):
"""
Create a QEMUQtestProtocol object.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode, listens on the socket (bool)
@raise socket.error on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self._address = address
self._sock = self._get_sock()
if server:
self._sock.bind(self._address)
self._sock.listen(1)
def _get_sock(self):
if isinstance(self._address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def connect(self):
"""
Connect to the qtest socket.
@raise socket.error on socket connection errors
"""
self._sock.connect(self._address)
def accept(self):
"""
Await connection from QEMU.
@raise socket.error on socket connection errors
"""
self._sock, _ = self._sock.accept()
def cmd(self, qtest_cmd):
"""
Send a qtest command on the wire.
@param qtest_cmd: qtest command text to be sent
"""
self._sock.sendall(qtest_cmd + "\n")
def close(self):
self._sock.close()
def settimeout(self, timeout):
self._sock.settimeout(timeout)
| gpl-2.0 | 8,801,474,164,776,954,000 | 25.915493 | 79 | 0.581894 | false |
gvb/odoo | addons/account_budget/wizard/account_budget_crossovered_report.py | 375 | 2089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_budget_crossvered_report(osv.osv_memory):
_name = "account.budget.crossvered.report"
_description = "Account Budget crossvered report"
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'crossovered.budget',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-full'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_crossoveredbudget', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,865,789,457,604,590,000 | 39.173077 | 130 | 0.595979 | false |
GitAngel/django | tests/raw_query/models.py | 150 | 1034 | from django.db import models
class Author(models.Model):
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
dob = models.DateField()
def __init__(self, *args, **kwargs):
super(Author, self).__init__(*args, **kwargs)
# Protect against annotations being passed to __init__ --
# this'll make the test suite get angry if annotations aren't
# treated differently than fields.
for k in kwargs:
assert k in [f.attname for f in self._meta.fields], \
"Author.__init__ got an unexpected parameter: %s" % k
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author)
paperback = models.BooleanField(default=False)
opening_line = models.TextField()
class Coffee(models.Model):
brand = models.CharField(max_length=255, db_column="name")
class Reviewer(models.Model):
reviewed = models.ManyToManyField(Book)
class FriendlyAuthor(Author):
pass
| bsd-3-clause | 2,459,366,505,848,057,000 | 28.542857 | 69 | 0.662476 | false |
CredoReference/edx-platform | common/djangoapps/credo_modules/models.py | 1 | 24536 | import logging
import time
import datetime
import json
import re
import uuid
from urlparse import urlparse
from django.dispatch import receiver
from django.contrib.auth.models import User
from django.db import models, IntegrityError, OperationalError, transaction
from django.db.models import F, Value
from django.db.models.functions import Concat
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey, UsageKey
from django.core.exceptions import ValidationError
from django.core.validators import URLValidator
from django.utils.timezone import utc
from model_utils.models import TimeStampedModel
from credo_modules.utils import additional_profile_fields_hash
from student.models import CourseEnrollment, CourseAccessRole, ENROLL_STATUS_CHANGE, EnrollStatusChange
log = logging.getLogger("course_usage")
class CredoModulesUserProfile(models.Model):
"""
This table contains info about the credo modules student.
"""
class Meta(object):
db_table = "credo_modules_userprofile"
ordering = ('user', 'course_id')
unique_together = (('user', 'course_id'),)
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
meta = models.TextField(blank=True) # JSON dictionary
fields_version = models.CharField(max_length=80)
@classmethod
def users_with_additional_profile(cls, course_id):
profiles = cls.objects.filter(course_id=course_id)
result = {}
for profile in profiles:
result[profile.user_id] = json.loads(profile.meta)
return result
def converted_meta(self):
try:
meta_dict = json.loads(self.meta)
except ValueError:
meta_dict = {}
return meta_dict
class StudentAttributesRegistrationModel(object):
"""
Helper model-like object to save registration properties.
"""
data = None
user = None
def __init__(self, data):
self.data = data
def save(self):
if self.data:
for values in self.data:
values['user'] = self.user
CredoStudentProperties(**values).save()
def check_and_save_enrollment_attributes(post_data, user, course_id):
try:
properties = EnrollmentPropertiesPerCourse.objects.get(course_id=course_id)
try:
enrollment_properties = json.loads(properties.data)
except ValueError:
return
if enrollment_properties:
CredoStudentProperties.objects.filter(course_id=course_id, user=user).delete()
for k, v in enrollment_properties.iteritems():
lti_key = v['lti'] if 'lti' in v else False
default = v['default'] if 'default' in v and v['default'] else None
if lti_key:
if lti_key in post_data:
CredoStudentProperties(user=user, course_id=course_id,
name=k, value=post_data[lti_key]).save()
elif default:
CredoStudentProperties(user=user, course_id=course_id,
name=k, value=default).save()
set_custom_term(course_id, user)
except EnrollmentPropertiesPerCourse.DoesNotExist:
return
def get_custom_term():
return datetime.datetime.now().strftime("%B %Y")
def save_custom_term_student_property(term, user, course_id):
return CredoStudentProperties.objects.get_or_create(user=user, course_id=course_id, name='term',
defaults={'value': term})
class CredoStudentProperties(models.Model):
"""
This table contains info about the custom student properties.
"""
class Meta(object):
db_table = "credo_student_properties"
ordering = ('user', 'course_id', 'name')
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True, null=True, blank=True)
name = models.CharField(max_length=255, db_index=True)
value = models.CharField(max_length=255)
def validate_json_props(value):
try:
json_data = json.loads(value)
if json_data:
for key in json_data:
if not re.match(r'\w+$', key):
raise ValidationError(
'%(key)s should contain only alphanumeric characters and underscores',
params={'key': key},
)
except ValueError:
raise ValidationError('Invalid JSON')
class RegistrationPropertiesPerMicrosite(models.Model):
org = models.CharField(max_length=255, verbose_name='Org', unique=True)
domain = models.CharField(max_length=255, verbose_name='Microsite Domain Name', unique=True)
data = models.TextField(
verbose_name="Registration Properties",
help_text="Config in JSON format",
validators=[validate_json_props]
)
class Meta(object):
db_table = "credo_registration_properties"
verbose_name = "registration properties item"
verbose_name_plural = "registration properties per microsite"
class EnrollmentPropertiesPerCourse(models.Model):
course_id = CourseKeyField(db_index=True, max_length=255)
data = models.TextField(
verbose_name="Enrollment Properties",
help_text="Config in JSON format",
validators=[validate_json_props]
)
class Meta(object):
db_table = "credo_enrollment_properties"
verbose_name = "enrollment properties item"
verbose_name_plural = "enrollment properties per course"
def user_must_fill_additional_profile_fields(course, user, block=None):
graded = block.graded if block else False
course_key = course.id
if graded and course.credo_additional_profile_fields and user.is_authenticated and\
user.email.endswith('@credomodules.com') and CourseEnrollment.is_enrolled(user, course_key):
fields_version = additional_profile_fields_hash(course.credo_additional_profile_fields)
profiles = CredoModulesUserProfile.objects.filter(user=user, course_id=course_key)
if len(profiles) == 0 or profiles[0].fields_version != fields_version:
return True
return False
class TermPerOrg(models.Model):
org = models.CharField(max_length=255, verbose_name='Org', null=False, blank=False, db_index=True)
term = models.CharField(max_length=255, verbose_name='Term', null=False, blank=False)
start_date = models.DateField(verbose_name='Start Date', null=False, blank=False)
end_date = models.DateField(verbose_name='End Date', null=False, blank=False)
def to_dict(self):
return {
'id': self.id,
'org': self.org,
'term': self.term,
'start_date': self.start_date.strftime('%-m/%-d/%Y'),
'end_date': self.end_date.strftime('%-m/%-d/%Y')
}
def set_custom_term(course_id, user):
save_custom_term_student_property(get_custom_term(), user, course_id)
@receiver(ENROLL_STATUS_CHANGE)
def add_custom_term_student_property_on_enrollment(sender, event=None, user=None, course_id=None, **kwargs):
if event == EnrollStatusChange.enroll:
set_custom_term(course_id, user)
def deadlock_db_retry(func):
def func_wrapper(*args, **kwargs):
max_attempts = 2
current_attempt = 0
while True:
try:
return func(*args, **kwargs)
except OperationalError, e:
if current_attempt < max_attempts:
current_attempt += 1
time.sleep(3)
else:
log.error('Failed to save course usage: ' + str(e))
return
return func_wrapper
class CourseUsage(models.Model):
MODULE_TYPES = (('problem', 'problem'),
('video', 'video'),
('html', 'html'),
('course', 'course'),
('chapter', 'Section'),
('sequential', 'Subsection'),
('vertical', 'Vertical'),
('library_content', 'Library Content'))
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True, null=True, blank=True)
block_id = models.CharField(max_length=255, db_index=True, null=True)
block_type = models.CharField(max_length=32, choices=MODULE_TYPES, null=True)
usage_count = models.IntegerField(null=True)
first_usage_time = models.DateTimeField(verbose_name='First Usage Time', null=True, blank=True)
last_usage_time = models.DateTimeField(verbose_name='Last Usage Time', null=True, blank=True)
session_ids = models.TextField(null=True, blank=True)
class Meta:
unique_together = (('user', 'course_id', 'block_id'),)
@classmethod
@deadlock_db_retry
def _update_block_usage(cls, course_key, user_id, block_type, block_id, unique_user_id):
course_usage = CourseUsage.objects.get(
course_id=course_key,
user_id=user_id,
block_type=block_type,
block_id=block_id
)
if unique_user_id not in course_usage.session_ids:
with transaction.atomic():
CourseUsage.objects.filter(course_id=course_key, user_id=user_id,
block_id=block_id, block_type=block_type) \
.update(last_usage_time=usage_dt_now(), usage_count=F('usage_count') + 1,
session_ids=Concat('session_ids', Value('|'), Value(unique_user_id)))
@classmethod
@deadlock_db_retry
def _add_block_usage(cls, course_key, user_id, block_type, block_id, unique_user_id):
datetime_now = usage_dt_now()
with transaction.atomic():
cu = CourseUsage(
course_id=course_key,
user_id=user_id,
usage_count=1,
block_type=block_type,
block_id=block_id,
first_usage_time=datetime_now,
last_usage_time=datetime_now,
session_ids=unique_user_id
)
cu.save()
return
@classmethod
def update_block_usage(cls, request, course_key, block_id):
unique_user_id = get_unique_user_id(request)
if unique_user_id and hasattr(request, 'user') and request.user.is_authenticated():
if not isinstance(course_key, CourseKey):
course_key = CourseKey.from_string(course_key)
if not isinstance(block_id, UsageKey):
block_id = UsageKey.from_string(block_id)
block_type = block_id.block_type
block_id = str(block_id)
try:
cls._update_block_usage(course_key, request.user.id,
block_type, block_id, unique_user_id)
except CourseUsage.DoesNotExist:
try:
cls._add_block_usage(course_key, request.user.id, block_type, block_id, unique_user_id)
return
except IntegrityError:
#cls._update_block_usage(course_key, request.user.id,
# block_type, block_id, unique_user_id)
return
class OrganizationType(models.Model):
title = models.CharField(max_length=255, verbose_name='Title', unique=True)
constructor_lti_link = models.BooleanField(default=True, verbose_name='Display LTI link in Constructor')
constructor_embed_code = models.BooleanField(default=True, verbose_name='Display embed code field in Constructor')
constructor_direct_link = models.BooleanField(default=True, verbose_name='Display direct link in Constructor')
insights_learning_outcomes = models.BooleanField(default=True, verbose_name='Display LO report in Credo Insights')
insights_assessments = models.BooleanField(default=True, verbose_name='Display Assessment report in Credo Insights')
insights_enrollment = models.BooleanField(default=True, verbose_name='Display Enrollment report in Credo Insights')
insights_engagement = models.BooleanField(default=True, verbose_name='Display Engagement report in Credo Insights')
instructor_dashboard_credo_insights = models.BooleanField(default=True, verbose_name='Show Credo Insights link'
' in the Instructor Dashboard')
enable_new_carousel_view = models.BooleanField(default=False, verbose_name='Enable new carousel view'
' (horizontal nav bar)')
enable_page_level_engagement = models.BooleanField(default=False, verbose_name='Enable Page Level for Engagement '
'Statistic in Insights')
enable_extended_progress_page = models.BooleanField(default=False, verbose_name='Enable Extended Progress Page')
available_roles = models.ManyToManyField('CustomUserRole', blank=True)
class Meta:
ordering = ['title']
def __unicode__(self):
return self.title
@classmethod
def get_all_constructor_fields(cls):
return ['lti_link', 'embed_code', 'direct_link']
def get_constructor_fields(self):
data = []
if self.constructor_lti_link:
data.append('lti_link')
if self.constructor_embed_code:
data.append('embed_code')
if self.constructor_direct_link:
data.append('direct_link')
return data
@classmethod
def get_all_insights_reports(cls):
return ['learning_outcomes', 'assessments', 'enrollment', 'engagement']
def get_insights_reports(self):
data = []
if self.insights_learning_outcomes:
data.append('learning_outcomes')
if self.insights_assessments:
data.append('assessments')
if self.insights_enrollment:
data.append('enrollment')
if self.insights_engagement:
data.append('engagement')
return data
class Organization(models.Model):
org = models.CharField(max_length=255, verbose_name='Org', unique=True)
default_frame_domain = models.CharField(max_length=255, verbose_name='Domain for LTI/Iframe/etc',
help_text="Default value is https://frame.credocourseware.com "
"in case of empty field",
null=True, blank=True,
validators=[URLValidator()])
org_type = models.ForeignKey(OrganizationType, on_delete=models.SET_NULL,
related_name='org_type',
null=True, blank=True, verbose_name='Org Type')
def save(self, *args, **kwargs):
if self.default_frame_domain:
o = urlparse(self.default_frame_domain)
self.default_frame_domain = o.scheme + '://' + o.netloc
super(Organization, self).save(*args, **kwargs)
def get_constructor_fields(self):
if self.org_type:
return self.org_type.get_constructor_fields()
else:
return OrganizationType.get_all_constructor_fields()
def get_insights_reports(self):
if self.org_type:
return self.org_type.get_insights_reports()
else:
return OrganizationType.get_all_insights_reports()
def get_page_level_engagement(self):
if self.org_type:
return self.org_type.enable_page_level_engagement
else:
return False
def to_dict(self):
return {
'org': self.org,
'default_frame_domain': self.default_frame_domain,
'constructor_fields': self.get_constructor_fields(),
'insights_reports': self.get_insights_reports(),
'page_level_engagement': self.get_page_level_engagement(),
}
@property
def is_carousel_view(self):
if self.org_type is not None:
return self.org_type.enable_new_carousel_view
else:
return False
class CourseExcludeInsights(models.Model):
course_id = CourseKeyField(max_length=255, db_index=True, null=True, blank=True)
class Meta(object):
db_table = "credo_course_exclude_insights"
verbose_name = "course"
verbose_name_plural = "exclude insights"
class SendScores(models.Model):
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
block_id = models.CharField(max_length=255, db_index=True)
last_send_time = models.DateTimeField(null=True, blank=True)
class Meta(object):
db_table = "credo_send_scores"
unique_together = (('user', 'course_id', 'block_id'),)
class SendScoresMailing(models.Model):
email_scores = models.ForeignKey(SendScores)
data = models.TextField(blank=True)
last_send_time = models.DateTimeField(null=True, blank=True)
class Meta(object):
db_table = "credo_send_scores_mailing"
class CopySectionTask(TimeStampedModel, models.Model):
NOT_STARTED = 'not_started'
STARTED = 'started'
FINISHED = 'finished'
ERROR = 'error'
STATUSES = (
(NOT_STARTED, 'Not Started'),
(STARTED, 'Started'),
(FINISHED, 'Finished'),
(ERROR, 'Error'),
)
task_id = models.CharField(max_length=255, db_index=True)
block_id = models.CharField(max_length=255)
source_course_id = CourseKeyField(max_length=255, db_index=True)
dst_course_id = CourseKeyField(max_length=255)
status = models.CharField(
max_length=255,
choices=STATUSES,
default=NOT_STARTED,
)
def set_started(self):
self.status = self.STARTED
def set_finished(self):
self.status = self.FINISHED
def set_error(self):
self.status = self.ERROR
def is_finished(self):
return self.status == self.FINISHED
class Meta(object):
db_table = "copy_section_task"
unique_together = (('task_id', 'block_id', 'dst_course_id'),)
class CustomUserRole(models.Model):
title = models.CharField(max_length=255, verbose_name='Title', unique=True)
alias = models.SlugField(max_length=255, verbose_name='Slug', unique=True)
course_outline_create_new_section = models.BooleanField(default=True,
verbose_name='Course Outline: Can create new Section')
course_outline_create_new_subsection = models.BooleanField(default=True,
verbose_name='Course Outline: Can create new Subsection')
course_outline_duplicate_section = models.BooleanField(default=True,
verbose_name='Course Outline: Can duplicate Section')
course_outline_duplicate_subsection = models.BooleanField(default=True,
verbose_name='Course Outline: Can duplicate Subsection')
course_outline_copy_to_other_course = models.BooleanField(default=True,
verbose_name='Course Outline: '
'Can copy Section to other course')
top_menu_tools = models.BooleanField(default=True, verbose_name='Top Menu: Tools Dropdown menu')
unit_add_advanced_component = models.BooleanField(default=True,
verbose_name='Unit: Can add advanced components to a unit')
unit_add_discussion_component = models.BooleanField(default=True,
verbose_name='Unit: Can add discussion components to a unit')
view_tags = models.BooleanField(default=True, verbose_name='Unit: Can view tags')
# rerun_course = models.BooleanField(default=True, verbose_name='Studio Home Page: Can re-run a course')
# create_new_course = models.BooleanField(default=True, verbose_name='Studio Home Page: Can create new course')
# view_archived_courses = models.BooleanField(default=True,
# verbose_name='Studio Home Page: Can view archived courses')
# create_new_library = models.BooleanField(default=True, verbose_name='Studio Home Page: Can create new library')
# view_libraries = models.BooleanField(default=True, verbose_name='Studio Home Page: Can view libraries')
class Meta:
ordering = ['title']
verbose_name = "custom user role"
verbose_name_plural = "custom user roles"
def __unicode__(self):
return self.title
def to_dict(self):
return {
'course_outline_create_new_section': self.course_outline_create_new_section,
'course_outline_create_new_subsection': self.course_outline_create_new_subsection,
'course_outline_duplicate_section': self.course_outline_duplicate_section,
'course_outline_duplicate_subsection': self.course_outline_duplicate_subsection,
'course_outline_copy_to_other_course': self.course_outline_copy_to_other_course,
'top_menu_tools': self.top_menu_tools,
'unit_add_advanced_component': self.unit_add_advanced_component,
'unit_add_discussion_component': self.unit_add_discussion_component,
'view_tags': self.view_tags
}
class CourseStaffExtended(models.Model):
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
role = models.ForeignKey(CustomUserRole)
class Meta(object):
unique_together = (('user', 'course_id'),)
verbose_name = "user role"
verbose_name_plural = "extended user roles"
UNIQUE_USER_ID_COOKIE = 'credo-course-usage-id'
def get_unique_user_id(request):
uid = request.COOKIES.get(UNIQUE_USER_ID_COOKIE, None)
if uid:
return unicode(uid)
return None
def generate_new_user_id_cookie(request, user_id):
request._update_unique_user_id = True
request.COOKIES[UNIQUE_USER_ID_COOKIE] = unicode(uuid.uuid4()) + '_' + user_id
def update_unique_user_id_cookie(request):
user_id = 'anon'
if hasattr(request, 'user') and request.user.is_authenticated():
user_id = str(request.user.id)
course_usage_cookie_id = get_unique_user_id(request)
if not course_usage_cookie_id:
generate_new_user_id_cookie(request, user_id)
else:
cookie_arr = course_usage_cookie_id.split('_')
if len(cookie_arr) < 2 or cookie_arr[1] != user_id:
generate_new_user_id_cookie(request, user_id)
def usage_dt_now():
"""
We can't use timezone.now() because we already use America/New_York timezone for usage values
so we just replace tzinfo in the datetime object
:return: datetime
"""
return datetime.datetime.now().replace(tzinfo=utc)
def get_org_roles_types(org):
roles = []
try:
org = Organization.objects.get(org=org)
if org.org_type is not None:
roles = [{
'title': r.title,
'id': r.id
} for r in org.org_type.available_roles.order_by('title').all()]
except Organization.DoesNotExist:
pass
roles.append({'id': 'staff', 'title': 'Staff'})
roles.append({'id': 'instructor', 'title': 'Admin'})
return sorted(roles, key=lambda k: k['title'])
def get_custom_user_role(course_id, user, check_enrollment=True):
if check_enrollment:
try:
enrollment = CourseAccessRole.objects.get(user=user, course_id=course_id)
if enrollment.role != 'staff':
return None
except CourseAccessRole.DoesNotExist:
return None
try:
staff_extended = CourseStaffExtended.objects.get(user=user, course_id=course_id)
return staff_extended.role
except CourseStaffExtended.DoesNotExist:
return None
def get_all_course_staff_extended_roles(course_id):
staff_users = CourseStaffExtended.objects.filter(course_id=course_id)
return {s.user_id: s.role_id for s in staff_users}
def get_extended_role_default_permissions():
return CustomUserRole().to_dict()
| agpl-3.0 | -8,030,811,223,852,001,000 | 39.026101 | 120 | 0.616115 | false |
meduz/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause | -155,444,959,354,534,600 | 24.714286 | 77 | 0.707407 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/boto-2.19.0-py2.7.egg/boto/s3/cors.py | 240 | 8941 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class CORSRule(object):
"""
CORS rule for a bucket.
:ivar id: A unique identifier for the rule. The ID value can be
up to 255 characters long. The IDs help you find a rule in
the configuration.
:ivar allowed_methods: An HTTP method that you want to allow the
origin to execute. Each CORSRule must identify at least one
origin and one method. Valid values are:
GET|PUT|HEAD|POST|DELETE
:ivar allowed_origin: An origin that you want to allow cross-domain
requests from. This can contain at most one * wild character.
Each CORSRule must identify at least one origin and one method.
The origin value can include at most one '*' wild character.
For example, "http://*.example.com". You can also specify
only * as the origin value allowing all origins cross-domain access.
:ivar allowed_header: Specifies which headers are allowed in a
pre-flight OPTIONS request via the
Access-Control-Request-Headers header. Each header name
specified in the Access-Control-Request-Headers header must
have a corresponding entry in the rule. Amazon S3 will send
only the allowed headers in a response that were requested.
This can contain at most one * wild character.
:ivar max_age_seconds: The time in seconds that your browser is to
cache the preflight response for the specified resource.
:ivar expose_header: One or more headers in the response that you
want customers to be able to access from their applications
(for example, from a JavaScript XMLHttpRequest object). You
add one ExposeHeader element in the rule for each header.
"""
def __init__(self, allowed_method=None, allowed_origin=None,
id=None, allowed_header=None, max_age_seconds=None,
expose_header=None):
if allowed_method is None:
allowed_method = []
self.allowed_method = allowed_method
if allowed_origin is None:
allowed_origin = []
self.allowed_origin = allowed_origin
self.id = id
if allowed_header is None:
allowed_header = []
self.allowed_header = allowed_header
self.max_age_seconds = max_age_seconds
if expose_header is None:
expose_header = []
self.expose_header = expose_header
def __repr__(self):
return '<Rule: %s>' % self.id
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ID':
self.id = value
elif name == 'AllowedMethod':
self.allowed_method.append(value)
elif name == 'AllowedOrigin':
self.allowed_origin.append(value)
elif name == 'AllowedHeader':
self.allowed_header.append(value)
elif name == 'MaxAgeSeconds':
self.max_age_seconds = int(value)
elif name == 'ExposeHeader':
self.expose_header.append(value)
else:
setattr(self, name, value)
def to_xml(self):
s = '<CORSRule>'
for allowed_method in self.allowed_method:
s += '<AllowedMethod>%s</AllowedMethod>' % allowed_method
for allowed_origin in self.allowed_origin:
s += '<AllowedOrigin>%s</AllowedOrigin>' % allowed_origin
for allowed_header in self.allowed_header:
s += '<AllowedHeader>%s</AllowedHeader>' % allowed_header
for expose_header in self.expose_header:
s += '<ExposeHeader>%s</ExposeHeader>' % expose_header
if self.max_age_seconds:
s += '<MaxAgeSeconds>%d</MaxAgeSeconds>' % self.max_age_seconds
if self.id:
s += '<ID>%s</ID>' % self.id
s += '</CORSRule>'
return s
class CORSConfiguration(list):
"""
A container for the rules associated with a CORS configuration.
"""
def startElement(self, name, attrs, connection):
if name == 'CORSRule':
rule = CORSRule()
self.append(rule)
return rule
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
def to_xml(self):
"""
Returns a string containing the XML version of the Lifecycle
configuration as defined by S3.
"""
s = '<CORSConfiguration>'
for rule in self:
s += rule.to_xml()
s += '</CORSConfiguration>'
return s
def add_rule(self, allowed_method, allowed_origin,
id=None, allowed_header=None, max_age_seconds=None,
expose_header=None):
"""
Add a rule to this CORS configuration. This only adds
the rule to the local copy. To install the new rule(s) on
the bucket, you need to pass this CORS config object
to the set_cors method of the Bucket object.
:type allowed_methods: list of str
:param allowed_methods: An HTTP method that you want to allow the
origin to execute. Each CORSRule must identify at least one
origin and one method. Valid values are:
GET|PUT|HEAD|POST|DELETE
:type allowed_origin: list of str
:param allowed_origin: An origin that you want to allow cross-domain
requests from. This can contain at most one * wild character.
Each CORSRule must identify at least one origin and one method.
The origin value can include at most one '*' wild character.
For example, "http://*.example.com". You can also specify
only * as the origin value allowing all origins
cross-domain access.
:type id: str
:param id: A unique identifier for the rule. The ID value can be
up to 255 characters long. The IDs help you find a rule in
the configuration.
:type allowed_header: list of str
:param allowed_header: Specifies which headers are allowed in a
pre-flight OPTIONS request via the
Access-Control-Request-Headers header. Each header name
specified in the Access-Control-Request-Headers header must
have a corresponding entry in the rule. Amazon S3 will send
only the allowed headers in a response that were requested.
This can contain at most one * wild character.
:type max_age_seconds: int
:param max_age_seconds: The time in seconds that your browser is to
cache the preflight response for the specified resource.
:type expose_header: list of str
:param expose_header: One or more headers in the response that you
want customers to be able to access from their applications
(for example, from a JavaScript XMLHttpRequest object). You
add one ExposeHeader element in the rule for each header.
"""
if not isinstance(allowed_method, (list, tuple)):
allowed_method = [allowed_method]
if not isinstance(allowed_origin, (list, tuple)):
allowed_origin = [allowed_origin]
if not isinstance(allowed_origin, (list, tuple)):
if allowed_origin is None:
allowed_origin = []
else:
allowed_origin = [allowed_origin]
if not isinstance(expose_header, (list, tuple)):
if expose_header is None:
expose_header = []
else:
expose_header = [expose_header]
rule = CORSRule(allowed_method, allowed_origin, id, allowed_header,
max_age_seconds, expose_header)
self.append(rule)
| apache-2.0 | 6,257,436,382,329,624,000 | 41.57619 | 77 | 0.635164 | false |
chhao91/QGIS | python/ext-libs/pygments/styles/perldoc.py | 364 | 2175 | # -*- coding: utf-8 -*-
"""
pygments.styles.perldoc
~~~~~~~~~~~~~~~~~~~~~~~
Style similar to the style used in the `perldoc`_ code blocks.
.. _perldoc: http://perldoc.perl.org/
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class PerldocStyle(Style):
"""
Style similar to the style used in the perldoc code blocks.
"""
background_color = '#eeeedd'
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#228B22',
Comment.Preproc: '#1e889b',
Comment.Special: '#8B008B bold',
String: '#CD5555',
String.Heredoc: '#1c7e71 italic',
String.Regex: '#B452CD',
String.Other: '#cb6c20',
String.Regex: '#1c7e71',
Number: '#B452CD',
Operator.Word: '#8B008B',
Keyword: '#8B008B bold',
Keyword.Type: '#a7a7a7',
Name.Class: '#008b45 bold',
Name.Exception: '#008b45 bold',
Name.Function: '#008b45',
Name.Namespace: '#008b45 underline',
Name.Variable: '#00688B',
Name.Constant: '#00688B',
Name.Decorator: '#707a7c',
Name.Tag: '#8B008B bold',
Name.Attribute: '#658b00',
Name.Builtin: '#658b00',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| gpl-2.0 | 3,377,716,182,243,876,000 | 30.521739 | 70 | 0.483218 | false |
spirrello/spirrello-pynet-work | applied_python/lib/python2.7/site-packages/setuptools/msvc9_support.py | 429 | 2187 | try:
import distutils.msvc9compiler
except ImportError:
pass
unpatched = dict()
def patch_for_specialized_compiler():
"""
Patch functions in distutils.msvc9compiler to use the standalone compiler
build for Python (Windows only). Fall back to original behavior when the
standalone compiler is not available.
"""
if 'distutils' not in globals():
# The module isn't available to be patched
return
if unpatched:
# Already patched
return
unpatched.update(vars(distutils.msvc9compiler))
distutils.msvc9compiler.find_vcvarsall = find_vcvarsall
distutils.msvc9compiler.query_vcvarsall = query_vcvarsall
def find_vcvarsall(version):
Reg = distutils.msvc9compiler.Reg
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
import os
vcvarsall = os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return unpatched['find_vcvarsall'](version)
def query_vcvarsall(version, *args, **kwargs):
try:
return unpatched['query_vcvarsall'](version, *args, **kwargs)
except distutils.errors.DistutilsPlatformError as exc:
if exc and "vcvarsall.bat" in exc.args[0]:
message = 'Microsoft Visual C++ %0.1f is required (%s).' % (version, exc.args[0])
if int(version) == 9:
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
raise distutils.errors.DistutilsPlatformError(
message + ' Get it from http://aka.ms/vcpython27'
)
raise distutils.errors.DistutilsPlatformError(message)
raise
| gpl-3.0 | -6,686,666,396,616,547,000 | 33.714286 | 93 | 0.638317 | false |
40223151/2014c2g9 | exts/w2/static/Brython2.0.0-20140209-164925/Lib/shutil.py | 720 | 39101 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| gpl-2.0 | 8,871,492,437,878,476,000 | 33.633304 | 90 | 0.60377 | false |
RaoUmer/django | django/contrib/auth/forms.py | 4 | 13427 | from __future__ import unicode_literals
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.html import format_html, format_html_join
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if encoded == '' or encoded == UNUSABLE_PASSWORD:
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def clean_password(self):
return self.initial["password"]
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=30)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct username and password. "
"Note that both fields are case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
username_field = UserModel._meta.get_field(getattr(UserModel, 'USERNAME_FIELD', 'username'))
self.fields['username'].label = capfirst(username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'])
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That email address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this email "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("Email"), max_length=75)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
UserModel = get_user_model()
email = self.cleaned_data["email"]
self.users_cache = UserModel.objects.filter(email__iexact=email,
is_active=True)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.id),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields = SortedDict([
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
])
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
| bsd-3-clause | 5,780,367,001,028,654,000 | 38.031977 | 112 | 0.595516 | false |
sauloal/PiCastPy | sqlalchemy/util/topological.py | 18 | 2656 | # util/topological.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Topological sorting algorithms."""
from ..exc import CircularDependencyError
from .. import util
__all__ = ['sort', 'sort_as_subsets', 'find_cycles']
def sort_as_subsets(tuples, allitems):
edges = util.defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
todo = set(allitems)
while todo:
output = set()
for node in list(todo):
if not todo.intersection(edges[node]):
output.add(node)
if not output:
raise CircularDependencyError(
"Circular dependency detected.",
find_cycles(tuples, allitems),
_gen_edges(edges)
)
todo.difference_update(output)
yield output
def sort(tuples, allitems):
"""sort the given list of items by dependency.
'tuples' is a list of tuples representing a partial ordering.
"""
for set_ in sort_as_subsets(tuples, allitems):
for s in set_:
yield s
def find_cycles(tuples, allitems):
# adapted from:
# http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
edges = util.defaultdict(set)
for parent, child in tuples:
edges[parent].add(child)
nodes_to_test = set(edges)
output = set()
# we'd like to find all nodes that are
# involved in cycles, so we do the full
# pass through the whole thing for each
# node in the original list.
# we can go just through parent edge nodes.
# if a node is only a child and never a parent,
# by definition it can't be part of a cycle. same
# if it's not in the edges at all.
for node in nodes_to_test:
stack = [node]
todo = nodes_to_test.difference(stack)
while stack:
top = stack[-1]
for node in edges[top]:
if node in stack:
cyc = stack[stack.index(node):]
todo.difference_update(cyc)
output.update(cyc)
if node in todo:
stack.append(node)
todo.remove(node)
break
else:
node = stack.pop()
return output
def _gen_edges(edges):
return set([
(right, left)
for left in edges
for right in edges[left]
])
| mit | -7,620,291,973,427,611,000 | 26.666667 | 85 | 0.569654 | false |
sszlm/MissionPlanner | Lib/unittest/runner.py | 53 | 6698 | """Running tests"""
import sys
import time
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| gpl-3.0 | 4,988,601,156,578,807,000 | 32.173469 | 79 | 0.567184 | false |
actuaryzhang/spark | python/pyspark/tests/test_taskcontext.py | 2 | 9144 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import random
import stat
import sys
import tempfile
import time
import unittest
from pyspark import SparkConf, SparkContext, TaskContext, BarrierTaskContext
from pyspark.testing.utils import PySparkTestCase
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_resources(self):
"""Test the resources are empty by default."""
rdd = self.sc.parallelize(range(10))
resources1 = rdd.map(lambda x: TaskContext.get().resources()).take(1)[0]
# Test using the constructor directly rather than the get()
resources2 = rdd.map(lambda x: TaskContext().resources()).take(1)[0]
self.assertEqual(len(resources1), 0)
self.assertEqual(len(resources2), 0)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
def test_get_local_property(self):
"""Verify that local properties set on the driver are available in TaskContext."""
key = "testkey"
value = "testvalue"
self.sc.setLocalProperty(key, value)
try:
rdd = self.sc.parallelize(range(1), 1)
prop1 = rdd.map(lambda _: TaskContext.get().getLocalProperty(key)).collect()[0]
self.assertEqual(prop1, value)
prop2 = rdd.map(lambda _: TaskContext.get().getLocalProperty("otherkey")).collect()[0]
self.assertTrue(prop2 is None)
finally:
self.sc.setLocalProperty(key, None)
def test_barrier(self):
"""
Verify that BarrierTaskContext.barrier() performs global sync among all barrier tasks
within a stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return time.time()
times = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
self.assertTrue(max(times) - min(times) < 1)
def test_barrier_infos(self):
"""
Verify that BarrierTaskContext.getTaskInfos() returns a list of all task infos in the
barrier stage.
"""
rdd = self.sc.parallelize(range(10), 4)
def f(iterator):
yield sum(iterator)
taskInfos = rdd.barrier().mapPartitions(f).map(lambda x: BarrierTaskContext.get()
.getTaskInfos()).collect()
self.assertTrue(len(taskInfos) == 4)
self.assertTrue(len(taskInfos[0]) == 4)
class TaskContextTestsWithWorkerReuse(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.worker.reuse", "true")
self.sc = SparkContext('local[2]', class_name, conf=conf)
def test_barrier_with_python_worker_reuse(self):
"""
Regression test for SPARK-25921: verify that BarrierTaskContext.barrier() with
reused python worker.
"""
# start a normal job first to start all workers and get all worker pids
worker_pids = self.sc.parallelize(range(2), 2).map(lambda x: os.getpid()).collect()
# the worker will reuse in this barrier job
rdd = self.sc.parallelize(range(10), 2)
def f(iterator):
yield sum(iterator)
def context_barrier(x):
tc = BarrierTaskContext.get()
time.sleep(random.randint(1, 10))
tc.barrier()
return (time.time(), os.getpid())
result = rdd.barrier().mapPartitions(f).map(context_barrier).collect()
times = list(map(lambda x: x[0], result))
pids = list(map(lambda x: x[1], result))
# check both barrier and worker reuse effect
self.assertTrue(max(times) - min(times) < 1)
for pid in pids:
self.assertTrue(pid in worker_pids)
def tearDown(self):
self.sc.stop()
class TaskContextTestsWithResources(unittest.TestCase):
def setUp(self):
class_name = self.__class__.__name__
self.tempFile = tempfile.NamedTemporaryFile(delete=False)
self.tempFile.write(b'echo {\\"name\\": \\"gpu\\", \\"addresses\\": [\\"0\\"]}')
self.tempFile.close()
os.chmod(self.tempFile.name, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP |
stat.S_IROTH | stat.S_IXOTH)
conf = SparkConf().set("spark.task.resource.gpu.amount", "1")
conf = conf.set("spark.executor.resource.gpu.amount", "1")
conf = conf.set("spark.executor.resource.gpu.discoveryScript", self.tempFile.name)
self.sc = SparkContext('local-cluster[2,1,1024]', class_name, conf=conf)
def test_resources(self):
"""Test the resources are available."""
rdd = self.sc.parallelize(range(10))
resources = rdd.map(lambda x: TaskContext.get().resources()).take(1)[0]
self.assertEqual(len(resources), 1)
self.assertTrue('gpu' in resources)
self.assertEqual(resources['gpu'].name, 'gpu')
self.assertEqual(resources['gpu'].addresses, ['0'])
def tearDown(self):
os.unlink(self.tempFile.name)
self.sc.stop()
if __name__ == "__main__":
import unittest
from pyspark.tests.test_taskcontext import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 | 8,256,407,110,357,641,000 | 39.821429 | 98 | 0.628062 | false |
svn2github/chromium-depot-tools | third_party/pylint/reporters/html.py | 59 | 2497 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""HTML reporter"""
import sys
from cgi import escape
from logilab.common.ureports import HTMLWriter, Section, Table
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class HTMLReporter(BaseReporter):
"""report messages and layouts in HTML"""
__implements__ = IReporter
name = 'html'
extension = 'html'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.msgs = []
def handle_message(self, msg):
"""manage message of different type and in the context of path"""
self.msgs += (msg.category, msg.module, msg.obj,
str(msg.line), str(msg.column), escape(msg.msg))
def set_output(self, output=None):
"""set output stream
messages buffered for old output is processed first"""
if self.out and self.msgs:
self._display(Section())
BaseReporter.set_output(self, output)
def _display(self, layout):
"""launch layouts display
overridden from BaseReporter to add insert the messages section
(in add_message, message is not displayed, just collected so it
can be displayed in an html table)
"""
if self.msgs:
# add stored messages to the layout
msgs = ['type', 'module', 'object', 'line', 'col_offset', 'message']
msgs += self.msgs
sect = Section('Messages')
layout.append(sect)
sect.append(Table(cols=6, children=msgs, rheaders=1))
self.msgs = []
HTMLWriter().format(layout, self.out)
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(HTMLReporter)
| bsd-3-clause | -1,768,536,535,786,423,300 | 35.188406 | 80 | 0.6664 | false |
huggingface/pytorch-transformers | tests/test_pipelines_translation.py | 2 | 4301 | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pytest
from transformers import pipeline
from transformers.testing_utils import is_pipeline_test, is_torch_available, require_torch, slow
from .test_pipelines_common import MonoInputPipelineCommonMixin
if is_torch_available():
from transformers.models.mbart import MBart50TokenizerFast, MBartForConditionalGeneration
class TranslationEnToDePipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "translation_en_to_de"
small_models = ["patrickvonplaten/t5-tiny-random"] # Default model - Models tested without the @slow decorator
large_models = [None] # Models tested with the @slow decorator
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
class TranslationEnToRoPipelineTests(MonoInputPipelineCommonMixin, unittest.TestCase):
pipeline_task = "translation_en_to_ro"
small_models = ["patrickvonplaten/t5-tiny-random"] # Default model - Models tested without the @slow decorator
large_models = [None] # Models tested with the @slow decorator
invalid_inputs = [4, "<mask>"]
mandatory_keys = ["translation_text"]
@is_pipeline_test
class TranslationNewFormatPipelineTests(unittest.TestCase):
@require_torch
@slow
def test_default_translations(self):
# We don't provide a default for this pair
with self.assertRaises(ValueError):
pipeline(task="translation_cn_to_ar")
# but we do for this one
translator = pipeline(task="translation_en_to_de")
self.assertEquals(translator.src_lang, "en")
self.assertEquals(translator.tgt_lang, "de")
@require_torch
@slow
def test_multilingual_translation(self):
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
translator = pipeline(task="translation", model=model, tokenizer=tokenizer)
# Missing src_lang, tgt_lang
with self.assertRaises(ValueError):
translator("This is a test")
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN")
self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}])
# src_lang, tgt_lang can be defined at pipeline call time
translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR")
outputs = translator("This is a test")
self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}])
@require_torch
def test_translation_on_odd_language(self):
model = "patrickvonplaten/t5-tiny-random"
translator = pipeline(task="translation_cn_to_ar", model=model)
self.assertEquals(translator.src_lang, "cn")
self.assertEquals(translator.tgt_lang, "ar")
@require_torch
def test_translation_default_language_selection(self):
model = "patrickvonplaten/t5-tiny-random"
with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"):
translator = pipeline(task="translation", model=model)
self.assertEqual(translator.task, "translation_en_to_de")
self.assertEquals(translator.src_lang, "en")
self.assertEquals(translator.tgt_lang, "de")
@require_torch
def test_translation_with_no_language_no_model_fails(self):
with self.assertRaises(ValueError):
pipeline(task="translation")
| apache-2.0 | -9,124,288,608,941,156,000 | 41.57 | 119 | 0.706366 | false |
jaruba/chromium.src | tools/telemetry/telemetry/timeline/inspector_importer_unittest.py | 44 | 5399 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import inspector_importer
from telemetry.timeline import model
from telemetry.timeline import trace_data
_BACKGROUND_MESSAGE = {
'data': {},
'type': 'BeginFrame',
'thread': '2',
'startTime': 1352783525921.824}
_SAMPLE_MESSAGE = {
'children': [
{'data': {},
'startTime': 1352783525921.823,
'type': 'BeginFrame',
'usedHeapSize': 1870736},
{'children': [],
'data': {'height': 723,
'width': 1272,
'x': 0,
'y': 0},
'endTime': 1352783525921.8992,
'frameId': '10.2',
'startTime': 1352783525921.8281,
'type': 'Layout',
'usedHeapSize': 1870736},
{'children': [
{'children': [],
'data': {'imageType': 'PNG'},
'endTime': 1352783525927.7939,
'startTime': 1352783525922.4241,
'type': 'DecodeImage',
'usedHeapSize': 1870736}
],
'data': {'height': 432,
'width': 1272,
'x': 0,
'y': 8},
'endTime': 1352783525927.9822,
'frameId': '10.2',
'startTime': 1352783525921.9292,
'type': 'Paint',
'usedHeapSize': 1870736}
],
'data': {},
'endTime': 1352783525928.041,
'startTime': 1352783525921.8049,
'type': 'Program'}
class InspectorEventParsingTest(unittest.TestCase):
def testParsingWithSampleData(self):
root_event = (inspector_importer.InspectorTimelineImporter
.RawEventToTimelineEvent(_SAMPLE_MESSAGE))
self.assertTrue(root_event)
decode_image_event = [
child for child in root_event.IterEventsInThisContainerRecrusively()
if child.name == 'DecodeImage'][0]
self.assertEquals(decode_image_event.args['data']['imageType'], 'PNG')
self.assertTrue(decode_image_event.duration > 0)
def testParsingWithSimpleData(self):
raw_event = {'type': 'Foo',
'startTime': 1,
'endTime': 3,
'children': []}
event = (inspector_importer.InspectorTimelineImporter
.RawEventToTimelineEvent(raw_event))
self.assertEquals('Foo', event.name)
self.assertEquals(1, event.start)
self.assertEquals(3, event.end)
self.assertEquals(2, event.duration)
self.assertEquals([], event.sub_slices)
def testParsingWithArgs(self):
raw_event = {'type': 'Foo',
'startTime': 1,
'endTime': 3,
'foo': 7,
'bar': {'x': 1}}
event = (inspector_importer.InspectorTimelineImporter
.RawEventToTimelineEvent(raw_event))
self.assertEquals('Foo', event.name)
self.assertEquals(1, event.start)
self.assertEquals(3, event.end)
self.assertEquals(2, event.duration)
self.assertEquals([], event.sub_slices)
self.assertEquals(7, event.args['foo'])
self.assertEquals(1, event.args['bar']['x'])
def testEventsWithNoStartTimeAreDropped(self):
raw_event = {'type': 'Foo',
'endTime': 1,
'children': []}
event = (inspector_importer.InspectorTimelineImporter.
RawEventToTimelineEvent(raw_event))
self.assertEquals(None, event)
def testEventsWithNoEndTimeAreOk(self):
raw_event = {'type': 'Foo',
'startTime': 1,
'children': []}
event = (inspector_importer.InspectorTimelineImporter.
RawEventToTimelineEvent(raw_event))
self.assertEquals(1, event.start)
self.assertEquals(1, event.end)
def testOutOfOrderData(self):
builder = trace_data.TraceDataBuilder()
builder.AddEventsTo(
trace_data.INSPECTOR_TRACE_PART, [{
'startTime': 5295.004, 'endTime': 5305.004,
'data': {}, 'type': 'Program',
'children': [
{'startTime': 5295.004, 'data': {'id': 0}, 'type': 'BeginFrame', },
{'startTime': 4492.973, 'endTime': 4493.086, 'data': {'rootNode': -3},
'type': 'PaintSetup'},
{'startTime': 5298.004, 'endTime': 5301.004, 'type': 'Paint',
'frameId': '53228.1',
'data': {'rootNode': -3, 'clip': [0, 0, 1018, 0, 1018, 764, 0, 764],
'layerId': 10}, 'children': []},
{'startTime': 5301.004, 'endTime': 5305.004, 'data': {},
'type': 'CompositeLayers', 'children': []},
{'startTime': 5305.004, 'data': {}, 'type': 'MarkFirstPaint'}
]}])
model.TimelineModel(builder.AsData(), shift_world_to_zero=False)
class InspectorImporterTest(unittest.TestCase):
def testImport(self):
builder = trace_data.TraceDataBuilder()
builder.AddEventsTo(trace_data.INSPECTOR_TRACE_PART,
[_BACKGROUND_MESSAGE, _SAMPLE_MESSAGE])
m = model.TimelineModel(builder.AsData(), shift_world_to_zero=False)
self.assertEquals(1, len(m.processes))
process = m.processes.values()[0]
threads = process.threads
self.assertEquals(2, len(threads))
renderer_thread = threads[0]
self.assertEquals(1, len(renderer_thread.toplevel_slices))
self.assertEquals('Program',
renderer_thread.toplevel_slices[0].name)
second_thread = threads['2']
self.assertEquals(1, len(second_thread.toplevel_slices))
self.assertEquals('BeginFrame',
second_thread.toplevel_slices[0].name)
| bsd-3-clause | 596,523,539,545,777,000 | 35.234899 | 78 | 0.60752 | false |
civisanalytics/ansible | lib/ansible/modules/system/solaris_zone.py | 21 | 17028 | #!/usr/bin/python
# (c) 2015, Paul Markham <pmarkham@netrefinery.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: solaris_zone
short_description: Manage Solaris zones
description:
- Create, start, stop and delete Solaris zones. This module doesn't currently allow
changing of options for a zone that's already been created.
version_added: "2.0"
author: Paul Markham
requirements:
- Solaris 10 or 11
options:
state:
required: true
description:
- C(present), configure and install the zone.
- C(installed), synonym for C(present).
- C(running), if the zone already exists, boot it, otherwise, configure and install
the zone first, then boot it.
- C(started), synonym for C(running).
- C(stopped), shutdown a zone.
- C(absent), destroy the zone.
- C(configured), configure the ready so that it's to be attached.
- C(attached), attach a zone, but do not boot it.
- C(detached), shutdown and detach a zone
choices: ['present', 'installed', 'started', 'running', 'stopped', 'absent', 'configured', 'attached', 'detached']
default: present
name:
description:
- Zone name.
required: true
path:
description:
- The path where the zone will be created. This is required when the zone is created, but not
used otherwise.
required: false
default: null
sparse:
description:
- Whether to create a sparse (C(true)) or whole root (C(false)) zone.
required: false
default: false
root_password:
description:
- The password hash for the root account. If not specified, the zone's root account
will not have a password.
required: false
default: null
config:
description:
- 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options
and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.
"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end"'
required: false
default: empty string
create_options:
description:
- 'Extra options to the zonecfg(1M) create command.'
required: false
default: empty string
install_options:
description:
- 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,
use this to specify the profile XML file, e.g. install_options="-c sc_profile.xml"'
required: false
default: empty string
attach_options:
description:
- 'Extra options to the zoneadm attach command. For example, this can be used to specify
whether a minimum or full update of packages is required and if any packages need to
be deleted. For valid values, see zoneadm(1M)'
required: false
default: empty string
timeout:
description:
- Timeout, in seconds, for zone to boot.
required: false
default: 600
'''
EXAMPLES = '''
- name: Create and install a zone, but don't boot it
solaris_zone:
name: zone1
state: present
path: /zones/zone1
sparse: True
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Create and install a zone and boot it
solaris_zone:
name: zone1
state: running
path: /zones/zone1
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Boot an already installed zone
solaris_zone:
name: zone1
state: running
- name: Stop a zone
solaris_zone:
name: zone1
state: stopped
- name: Destroy a zone
solaris_zone:
name: zone1
state: absent
- name: Detach a zone
solaris_zone:
name: zone1
state: detached
- name: Configure a zone, ready to be attached
solaris_zone:
name: zone1
state: configured
path: /zones/zone1
root_password: Be9oX7OSwWoU.
config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'
- name: Attach zone1
solaris_zone:
name: zone1
state: attached
attach_options: -u
'''
import sys
import os
import platform
import tempfile
class Zone(object):
def __init__(self, module):
self.changed = False
self.msg = []
self.module = module
self.path = self.module.params['path']
self.name = self.module.params['name']
self.sparse = self.module.params['sparse']
self.root_password = self.module.params['root_password']
self.timeout = self.module.params['timeout']
self.config = self.module.params['config']
self.create_options = self.module.params['create_options']
self.install_options = self.module.params['install_options']
self.attach_options = self.module.params['attach_options']
self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)
self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)
self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)
if self.module.check_mode:
self.msg.append('Running in check mode')
if platform.system() != 'SunOS':
self.module.fail_json(msg='This module requires Solaris')
(self.os_major, self.os_minor) = platform.release().split('.')
if int(self.os_minor) < 10:
self.module.fail_json(msg='This module requires Solaris 10 or later')
def configure(self):
if not self.path:
self.module.fail_json(msg='Missing required argument: path')
if not self.module.check_mode:
t = tempfile.NamedTemporaryFile(delete = False)
if self.sparse:
t.write('create %s\n' % self.create_options)
self.msg.append('creating sparse-root zone')
else:
t.write('create -b %s\n' % self.create_options)
self.msg.append('creating whole-root zone')
t.write('set zonepath=%s\n' % self.path)
t.write('%s\n' % self.config)
t.close()
cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create zone. %s' % (out + err))
os.unlink(t.name)
self.changed = True
self.msg.append('zone configured')
def install(self):
if not self.module.check_mode:
cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to install zone. %s' % (out + err))
if int(self.os_minor) == 10:
self.configure_sysid()
self.configure_password()
self.configure_ssh_keys()
self.changed = True
self.msg.append('zone installed')
def uninstall(self):
if self.is_installed():
if not self.module.check_mode:
cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))
self.changed = True
self.msg.append('zone uninstalled')
def configure_sysid(self):
if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):
os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)
open('%s/root/noautoshutdown' % self.path, 'w').close()
node = open('%s/root/etc/nodename' % self.path, 'w')
node.write(self.name)
node.close()
id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')
id.write('1 # System previously configured?\n')
id.write('1 # Bootparams succeeded?\n')
id.write('1 # System is on a network?\n')
id.write('1 # Extended network information gathered?\n')
id.write('0 # Autobinder succeeded?\n')
id.write('1 # Network has subnets?\n')
id.write('1 # root password prompted for?\n')
id.write('1 # locale and term prompted for?\n')
id.write('1 # security policy in place\n')
id.write('1 # NFSv4 domain configured\n')
id.write('0 # Auto Registration Configured\n')
id.write('vt100')
id.close()
def configure_ssh_keys(self):
rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path
dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path
if not os.path.isfile(rsa_key_file):
cmd = '%s -f %s -t rsa -N ""' % (self.ssh_keygen_cmd, rsa_key_file)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))
if not os.path.isfile(dsa_key_file):
cmd = '%s -f %s -t dsa -N ""' % (self.ssh_keygen_cmd, dsa_key_file)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))
def configure_password(self):
shadow = '%s/root/etc/shadow' % self.path
if self.root_password:
f = open(shadow, 'r')
lines = f.readlines()
f.close()
for i in range(0, len(lines)):
fields = lines[i].split(':')
if fields[0] == 'root':
fields[1] = self.root_password
lines[i] = ':'.join(fields)
f = open(shadow, 'w')
for line in lines:
f.write(line)
f.close()
def boot(self):
if not self.module.check_mode:
cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))
"""
The boot command can return before the zone has fully booted. This is especially
true on the first boot when the zone initializes the SMF services. Unless the zone
has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.
Wait until the zone's console login is running; once that's running, consider the zone booted.
"""
elapsed = 0
while True:
if elapsed > self.timeout:
self.module.fail_json(msg='timed out waiting for zone to boot')
rc = os.system('ps -z %s -o args|grep "ttymon.*-d /dev/console" > /dev/null 2>/dev/null' % self.name)
if rc == 0:
break
time.sleep(10)
elapsed += 10
self.changed = True
self.msg.append('zone booted')
def destroy(self):
if self.is_running():
self.stop()
if self.is_installed():
self.uninstall()
if not self.module.check_mode:
cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))
self.changed = True
self.msg.append('zone deleted')
def stop(self):
if not self.module.check_mode:
cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))
self.changed = True
self.msg.append('zone stopped')
def detach(self):
if not self.module.check_mode:
cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))
self.changed = True
self.msg.append('zone detached')
def attach(self):
if not self.module.check_mode:
cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))
self.changed = True
self.msg.append('zone attached')
def exists(self):
cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def is_running(self):
return self.status() == 'running'
def is_installed(self):
return self.status() == 'installed'
def is_configured(self):
return self.status() == 'configured'
def status(self):
cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
return out.split(':')[2]
else:
return 'undefined'
def state_present(self):
if self.exists():
self.msg.append('zone already exists')
else:
self.configure()
self.install()
def state_running(self):
self.state_present()
if self.is_running():
self.msg.append('zone already running')
else:
self.boot()
def state_stopped(self):
if self.exists():
self.stop()
else:
self.module.fail_json(msg='zone does not exist')
def state_absent(self):
if self.exists():
if self.is_running():
self.stop()
self.destroy()
else:
self.msg.append('zone does not exist')
def state_configured(self):
if self.exists():
self.msg.append('zone already exists')
else:
self.configure()
def state_detached(self):
if not self.exists():
self.module.fail_json(msg='zone does not exist')
if self.is_configured():
self.msg.append('zone already detached')
else:
self.stop()
self.detach()
def state_attached(self):
if not self.exists():
self.msg.append('zone does not exist')
if self.is_configured():
self.attach()
else:
self.msg.append('zone already attached')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']),
path = dict(default=None),
sparse = dict(default=False, type='bool'),
root_password = dict(default=None, no_log=True),
timeout = dict(default=600, type='int'),
config = dict(default=''),
create_options = dict(default=''),
install_options = dict(default=''),
attach_options = dict(default=''),
),
supports_check_mode=True
)
zone = Zone(module)
state = module.params['state']
if state == 'running' or state == 'started':
zone.state_running()
elif state == 'present' or state == 'installed':
zone.state_present()
elif state == 'stopped':
zone.state_stopped()
elif state == 'absent':
zone.state_absent()
elif state == 'configured':
zone.state_configured()
elif state == 'detached':
zone.state_detached()
elif state == 'attached':
zone.state_attached()
else:
module.fail_json(msg='Invalid state: %s' % state)
module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | -4,217,340,596,590,327,300 | 33.965092 | 169 | 0.571882 | false |
jrandall/python-rt | test_rt.py | 4 | 10246 | """Tests for Rt - Python interface to Request Tracker :term:`API`"""
__license__ = """ Copyright (C) 2013 CZ.NIC, z.s.p.o.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__docformat__ = "reStructuredText en"
__authors__ = [
'"Jiri Machalek" <jiri.machalek@nic.cz>'
]
import unittest
import random
import string
from six import iteritems
from six.moves import range
import rt
class RtTestCase(unittest.TestCase):
RT_VALID_CREDENTIALS = {
'RT3.8 stable': {
'url': 'http://rt.easter-eggs.org/demos/3.8/REST/1.0',
'admin': {
'default_login': 'admin',
'default_password': 'admin',
},
'john.foo': {
'default_login': 'john.foo',
'default_password': 'john.foo',
}
},
'RT4.0 stable': {
'url': 'http://rt.easter-eggs.org/demos/4.0/REST/1.0',
'admin': {
'default_login': 'admin',
'default_password': 'admin',
},
'john.foo': {
'default_login': 'john.foo',
'default_password': 'john.foo',
}
},
}
RT_INVALID_CREDENTIALS = {
'RT3.8 stable (bad credentials)': {
'url': 'http://rt.easter-eggs.org/demos/3.8/REST/1.0',
'default_login': 'idontexist',
'default_password': 'idonthavepassword',
},
}
RT_MISSING_CREDENTIALS = {
'RT4.0 stable (missing credentials)': {
'url': 'http://rt.easter-eggs.org/demos/4.0/REST/1.0',
},
}
RT_BAD_URL = {
'RT (bad url)': {
'url': 'http://httpbin.org/status/404',
'default_login': 'idontexist',
'default_password': 'idonthavepassword',
},
}
def test_login_and_logout(self):
for name in self.RT_VALID_CREDENTIALS:
tracker = rt.Rt(self.RT_VALID_CREDENTIALS[name]['url'], **self.RT_VALID_CREDENTIALS[name]['john.foo'])
self.assertTrue(tracker.login(), 'Invalid login to RT demo site ' + name)
self.assertTrue(tracker.logout(), 'Invalid logout from RT demo site ' + name)
for name, params in iteritems(self.RT_INVALID_CREDENTIALS):
tracker = rt.Rt(**params)
self.assertFalse(tracker.login(), 'Login to RT demo site ' + name + ' should fail but did not')
self.assertRaises(rt.AuthorizationError, lambda: tracker.search())
for name, params in iteritems(self.RT_MISSING_CREDENTIALS):
tracker = rt.Rt(**params)
self.assertRaises(rt.AuthorizationError, lambda: tracker.login())
for name, params in iteritems(self.RT_BAD_URL):
tracker = rt.Rt(**params)
self.assertRaises(rt.UnexpectedResponse, lambda: tracker.login())
def check_or_create_queue(self, name):
tracker = rt.Rt(self.RT_VALID_CREDENTIALS[name]['url'], **self.RT_VALID_CREDENTIALS[name]['admin'])
tracker.login()
queue = tracker.get_queue('General')
if 'Name' not in queue:
queue_id = tracker.create_queue('General')
tracker.logout()
def test_ticket_operations(self):
ticket_subject = 'Testing issue ' + "".join([random.choice(string.ascii_letters) for i in range(15)])
ticket_text = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
for name in ('RT4.0 stable', 'RT3.8 stable'):
self.check_or_create_queue(name)
url = self.RT_VALID_CREDENTIALS[name]['url']
default_login = self.RT_VALID_CREDENTIALS[name]['john.foo']['default_login']
default_password = self.RT_VALID_CREDENTIALS[name]['john.foo']['default_password']
tracker = rt.Rt(url, default_login=default_login, default_password=default_password)
self.assertTrue(tracker.login(), 'Invalid login to RT demo site ' + name)
# empty search result
search_result = tracker.search(Subject=ticket_subject)
self.assertEqual(search_result, [], 'Search for ticket with random subject returned non empty list.')
# create
ticket_id = tracker.create_ticket(Subject=ticket_subject, Text=ticket_text)
self.assertTrue(ticket_id > -1, 'Creating ticket failed.')
# search
search_result = tracker.search(Subject=ticket_subject)
self.assertEqual(len(search_result), 1, 'Created ticket is not found by the subject.')
self.assertEqual(search_result[0]['id'], 'ticket/' + str(ticket_id), 'Bad id in search result of just created ticket.')
self.assertEqual(search_result[0]['Status'], 'new', 'Bad status in search result of just created ticket.')
# search all queues
search_result = tracker.search(Queue=rt.ALL_QUEUES, Subject=ticket_subject)
self.assertEqual(search_result[0]['id'], 'ticket/' + str(ticket_id), 'Bad id in search result of just created ticket.')
# raw search
search_result = tracker.search(raw_query='Subject="%s"' % ticket_subject)
self.assertEqual(len(search_result), 1, 'Created ticket is not found by the subject.')
self.assertEqual(search_result[0]['id'], 'ticket/' + str(ticket_id), 'Bad id in search result of just created ticket.')
self.assertEqual(search_result[0]['Status'], 'new', 'Bad status in search result of just created ticket.')
# raw search all queues
search_result = tracker.search(Queue=rt.ALL_QUEUES, raw_query='Subject="%s"' % ticket_subject)
self.assertEqual(search_result[0]['id'], 'ticket/' + str(ticket_id), 'Bad id in search result of just created ticket.')
# get ticket
ticket = tracker.get_ticket(ticket_id)
self.assertEqual(ticket, search_result[0], 'Ticket get directly by its id is not equal to previous search result.')
# edit ticket
requestors = ['tester1@example.com', 'tester2@example.com']
tracker.edit_ticket(ticket_id, Status='open', Requestors=requestors)
# get ticket (edited)
ticket = tracker.get_ticket(ticket_id)
self.assertEqual(ticket['Status'], 'open', 'Ticket status was not changed to open.')
self.assertEqual(ticket['Requestors'], requestors, 'Ticket requestors were not added properly.')
# get history
hist = tracker.get_history(ticket_id)
self.assertTrue(len(hist) > 0, 'Empty ticket history.')
self.assertEqual(hist[0]['Content'], ticket_text, 'Ticket text was not receives is it was submited.')
# get_short_history
short_hist = tracker.get_short_history(ticket_id)
self.assertTrue(len(short_hist) > 0, 'Empty ticket short history.')
self.assertEqual(short_hist[0][1], 'Ticket created by john.foo')
# create 2nd ticket
ticket2_subject = 'Testing issue ' + "".join([random.choice(string.ascii_letters) for i in range(15)])
ticket2_id = tracker.create_ticket(Subject=ticket2_subject)
self.assertTrue(ticket2_id > -1, 'Creating 2nd ticket failed.')
# edit link
self.assertTrue(tracker.edit_link(ticket_id, 'DependsOn', ticket2_id))
# get links
links1 = tracker.get_links(ticket_id)
self.assertTrue('DependsOn' in links1, 'Missing just created link DependsOn.')
self.assertTrue(links1['DependsOn'][0].endswith('ticket/' + str(ticket2_id)), 'Unexpected value of link DependsOn.')
links2 = tracker.get_links(ticket2_id)
self.assertTrue('DependedOnBy' in links2, 'Missing just created link DependedOnBy.')
self.assertTrue(links2['DependedOnBy'][0].endswith('ticket/' + str(ticket_id)), 'Unexpected value of link DependedOnBy.')
# reply with attachment
attachment_content = b'Content of attachment.'
attachment_name = 'attachment-name.txt'
reply_text = 'Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.'
# should provide a content type as RT 4.0 type guessing is broken (missing use statement for guess_media_type in REST.pm)
self.assertTrue(tracker.reply(ticket_id, text=reply_text, files=[(attachment_name, attachment_content, 'text/plain')]), 'Reply to ticket returned False indicating error.')
at_ids = tracker.get_attachments_ids(ticket_id)
self.assertTrue(at_ids, 'Emply list with attachment ids, something went wrong.')
at_content = tracker.get_attachment_content(ticket_id, at_ids[-1])
self.assertEqual(at_content, attachment_content, 'Recorded attachment is not equal to the original file.')
# attachments list
at_list = tracker.get_attachments(ticket_id)
at_names = [at[1] for at in at_list]
self.assertTrue(attachment_name in at_names, 'Attachment name is not in the list of attachments.')
# merge tickets
self.assertTrue(tracker.merge_ticket(ticket2_id, ticket_id), 'Merging tickets failed.')
# delete ticket
self.assertTrue(tracker.edit_ticket(ticket_id, Status='deleted'), 'Ticket delete failed.')
# get user
self.assertEqual(tracker.get_user(default_login)['EmailAddress'], default_login + '@no.mail', 'Bad user email received.')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,281,327,186,178,988,000 | 52.364583 | 183 | 0.618388 | false |
liu602348184/django | tests/template_tests/syntax_tests/test_filter_syntax.py | 221 | 8844 | # coding: utf-8
from __future__ import unicode_literals
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import SomeClass, SomeOtherException, UTF8Class, setup
class FilterSyntaxTests(SimpleTestCase):
@setup({'filter-syntax01': '{{ var|upper }}'})
def test_filter_syntax01(self):
"""
Basic filter usage
"""
output = self.engine.render_to_string('filter-syntax01', {"var": "Django is the greatest!"})
self.assertEqual(output, "DJANGO IS THE GREATEST!")
@setup({'filter-syntax02': '{{ var|upper|lower }}'})
def test_filter_syntax02(self):
"""
Chained filters
"""
output = self.engine.render_to_string('filter-syntax02', {"var": "Django is the greatest!"})
self.assertEqual(output, "django is the greatest!")
@setup({'filter-syntax03': '{{ var |upper }}'})
def test_filter_syntax03(self):
"""
Allow spaces before the filter pipe
"""
output = self.engine.render_to_string('filter-syntax03', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax04': '{{ var| upper }}'})
def test_filter_syntax04(self):
"""
Allow spaces after the filter pipe
"""
output = self.engine.render_to_string('filter-syntax04', {'var': 'Django is the greatest!'})
self.assertEqual(output, 'DJANGO IS THE GREATEST!')
@setup({'filter-syntax05': '{{ var|does_not_exist }}'})
def test_filter_syntax05(self):
"""
Raise TemplateSyntaxError for a nonexistent filter
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax05')
@setup({'filter-syntax06': '{{ var|fil(ter) }}'})
def test_filter_syntax06(self):
"""
Raise TemplateSyntaxError when trying to access a filter containing
an illegal character
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax06')
@setup({'filter-syntax07': "{% nothing_to_see_here %}"})
def test_filter_syntax07(self):
"""
Raise TemplateSyntaxError for invalid block tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax07')
@setup({'filter-syntax08': "{% %}"})
def test_filter_syntax08(self):
"""
Raise TemplateSyntaxError for empty block tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter-syntax08')
@setup({'filter-syntax09': '{{ var|cut:"o"|upper|lower }}'})
def test_filter_syntax09(self):
"""
Chained filters, with an argument to the first one
"""
output = self.engine.render_to_string('filter-syntax09', {'var': 'Foo'})
self.assertEqual(output, 'f')
@setup({'filter-syntax10': r'{{ var|default_if_none:" endquote\" hah" }}'})
def test_filter_syntax10(self):
"""
Literal string as argument is always "safe" from auto-escaping.
"""
output = self.engine.render_to_string('filter-syntax10', {"var": None})
self.assertEqual(output, ' endquote" hah')
@setup({'filter-syntax11': r'{{ var|default_if_none:var2 }}'})
def test_filter_syntax11(self):
"""
Variable as argument
"""
output = self.engine.render_to_string('filter-syntax11', {"var": None, "var2": "happy"})
self.assertEqual(output, 'happy')
@setup({'filter-syntax12': r'{{ var|yesno:"yup,nup,mup" }} {{ var|yesno }}'})
def test_filter_syntax12(self):
"""
Default argument testing
"""
output = self.engine.render_to_string('filter-syntax12', {"var": True})
self.assertEqual(output, 'yup yes')
@setup({'filter-syntax13': r'1{{ var.method3 }}2'})
def test_filter_syntax13(self):
"""
Fail silently for methods that raise an exception with a
`silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax13', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax14': r'1{{ var.method4 }}2'})
def test_filter_syntax14(self):
"""
In methods that raise an exception without a
`silent_variable_attribute` set to True, the exception propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax14', {"var": SomeClass()})
@setup({'filter-syntax15': r'{{ var|default_if_none:"foo\bar" }}'})
def test_filter_syntax15(self):
"""
Escaped backslash in argument
"""
output = self.engine.render_to_string('filter-syntax15', {"var": None})
self.assertEqual(output, r'foo\bar')
@setup({'filter-syntax16': r'{{ var|default_if_none:"foo\now" }}'})
def test_filter_syntax16(self):
"""
Escaped backslash using known escape char
"""
output = self.engine.render_to_string('filter-syntax16', {"var": None})
self.assertEqual(output, r'foo\now')
@setup({'filter-syntax17': r'{{ var|join:"" }}'})
def test_filter_syntax17(self):
"""
Empty strings can be passed as arguments to filters
"""
output = self.engine.render_to_string('filter-syntax17', {'var': ['a', 'b', 'c']})
self.assertEqual(output, 'abc')
@setup({'filter-syntax18': r'{{ var }}'})
def test_filter_syntax18(self):
"""
Make sure that any unicode strings are converted to bytestrings
in the final output.
"""
output = self.engine.render_to_string('filter-syntax18', {'var': UTF8Class()})
self.assertEqual(output, '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111')
@setup({'filter-syntax19': '{{ var|truncatewords:1 }}'})
def test_filter_syntax19(self):
"""
Numbers as filter arguments should work
"""
output = self.engine.render_to_string('filter-syntax19', {"var": "hello world"})
self.assertEqual(output, "hello ...")
@setup({'filter-syntax20': '{{ ""|default_if_none:"was none" }}'})
def test_filter_syntax20(self):
"""
Filters should accept empty string constants
"""
output = self.engine.render_to_string('filter-syntax20')
self.assertEqual(output, "")
@setup({'filter-syntax21': r'1{{ var.silent_fail_key }}2'})
def test_filter_syntax21(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a "silent_variable_failure" attribute
"""
output = self.engine.render_to_string('filter-syntax21', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax22': r'1{{ var.silent_fail_attribute }}2'})
def test_filter_syntax22(self):
"""
Fail silently for non-callable attribute and dict lookups which
raise an exception with a `silent_variable_failure` attribute
"""
output = self.engine.render_to_string('filter-syntax22', {"var": SomeClass()})
if self.engine.string_if_invalid:
self.assertEqual(output, "1INVALID2")
else:
self.assertEqual(output, "12")
@setup({'filter-syntax23': r'1{{ var.noisy_fail_key }}2'})
def test_filter_syntax23(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax23', {"var": SomeClass()})
@setup({'filter-syntax24': r'1{{ var.noisy_fail_attribute }}2'})
def test_filter_syntax24(self):
"""
In attribute and dict lookups that raise an unexpected exception
without a `silent_variable_attribute` set to True, the exception
propagates
"""
with self.assertRaises(SomeOtherException):
self.engine.render_to_string('filter-syntax24', {"var": SomeClass()})
@setup({'filter-syntax25': '{{ var.attribute_error_attribute }}'})
def test_filter_syntax25(self):
"""
#16383 - Attribute errors from an @property value should be
reraised.
"""
with self.assertRaises(AttributeError):
self.engine.render_to_string('filter-syntax25', {'var': SomeClass()})
| bsd-3-clause | -3,255,777,177,288,275,500 | 37.452174 | 100 | 0.605834 | false |
torre76/py-crypto-params | docs/conf.py | 1 | 10172 | # -*- coding: utf-8 -*-
#
# py-crypto-params documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 29 20:30:34 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
import os
import shlex
import sphinx_rtd_theme
def copyright_builder():
"""
Simple function that build copyright information with correct year range
:return: Copyright info that will be printed by Sphinx
:rtype: str
"""
beginning_year = 2015
current_year = datetime.date.today().year
if beginning_year == current_year:
return u"{year}, Gian Luca Dalla Torre".format(year=beginning_year)
else:
return u"{start} - {end}, Gian Luca Dalla Torre".format(start=beginning_year, end=current_year)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath('.'), os.pardir)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py-crypto-params'
copyright = copyright_builder()
author = u'Gian Luca Dalla Torre'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'py-crypto-paramsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py-crypto-params.tex', u'py-crypto-params Documentation',
u'Gian Luca Dalla Torre', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py-crypto-params', u'py-crypto-params Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py-crypto-params', u'py-crypto-params Documentation',
author, 'py-crypto-params', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| lgpl-3.0 | -5,823,799,457,224,496,000 | 31.602564 | 103 | 0.706646 | false |
jorge-marques/shoop | shoop/utils/setup.py | 8 | 1445 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
class Setup(object):
def __init__(self, load_from=None):
self.commit(load_from)
def is_valid_key(self, key):
return key == key.upper() and not key.startswith("_")
def commit(self, source):
if source:
if not hasattr(source, "items"): # pragma: no cover
source = vars(source)
for key, value in source.items():
if self.is_valid_key(key):
setattr(self, key, value)
def values(self):
for key, value in self.__dict__.items():
if self.is_valid_key(key): # pragma: no branch
yield (key, value)
def get(self, key, default=None): # pragma: no cover
return getattr(self, key, default)
def getlist(self, key, default=()): # pragma: no cover
val = getattr(self, key, default)
return list(val)
@classmethod
def configure(cls, configure):
setup = cls()
try:
configure(setup)
except: # pragma: no cover
print("@" * 80)
import traceback
import sys
traceback.print_exc()
print("@" * 80)
sys.exit(1)
return setup.values()
| agpl-3.0 | 2,670,507,481,527,353,000 | 28.489796 | 68 | 0.553633 | false |
PlanTool/plantool | code/Deterministic/LAMA/seq-sat-lama/lama/translate/pddl/f_expression.py | 10 | 5321 | #######################################################################
#
# Author: Gabi Roeger
# Modified by: Silvia Richter (silvia.richter@nicta.com.au)
# (C) Copyright 2008: Gabi Roeger and NICTA
#
# This file is part of LAMA.
#
# LAMA is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the license, or (at your option) any later version.
#
# LAMA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
#######################################################################
import string
import conditions
def parse_expression(exp):
if isinstance(exp, list):
functionsymbol = exp[0]
return PrimitiveNumericExpression(functionsymbol,
[conditions.parse_term(arg) for arg in exp[1:]])
elif exp.replace(".","").isdigit():
return NumericConstant(string.atof(exp))
else:
return PrimitiveNumericExpression(exp,[])
def parse_assignment(alist):
assert len(alist) == 3
op = alist[0]
head = parse_expression(alist[1])
exp = parse_expression(alist[2])
if op == "=":
return Assign(head, exp)
elif op == "increase":
return Increase(head, exp)
else:
assert False, "Assignment operator not supported."
class FunctionalExpression(object):
def __init__(self, parts):
self.parts = tuple(parts)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for part in self.parts:
part.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
raise ValueError("Cannot instantiate condition: not normalized")
class NumericConstant(FunctionalExpression):
parts = ()
def __init__(self, value):
self.value = value
def __eq__(self, other):
return (self.__class__ == other.__class__ and self.value == other.value)
def __str__(self):
return "%s %s" % (self.__class__.__name__, self.value)
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
return self
class PrimitiveNumericExpression(FunctionalExpression):
parts = ()
def __init__(self, symbol, args):
self.symbol = symbol
self.args = tuple(args)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and self.symbol == other.symbol
and len(self.args) == len(other.args)):
return False
else:
for s,o in zip(self.args, other.args):
if not s == o:
return False
return True
def __str__(self):
return "%s %s(%s)" % ("PNE", self.symbol, ", ".join(map(str, self.args)))
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
for arg in self.args:
arg.dump(indent + " ")
def _dump(self):
return str(self)
def instantiate(self, var_mapping, init_facts):
args = [conditions.ObjectTerm(var_mapping.get(arg.name, arg.name)) for arg in self.args]
pne = PrimitiveNumericExpression(self.symbol, args)
assert not self.symbol == "total-cost"
# We know this expression is constant. Substitute it by corresponding
# initialization from task.
for fact in init_facts:
if isinstance(fact, FunctionAssignment):
if fact.fluent == pne:
return fact.expression
assert False, "Could not find instantiation for PNE!"
class FunctionAssignment(object):
def __init__(self, fluent, expression):
self.fluent = fluent
self.expression = expression
def __str__(self):
return "%s %s %s" % (self.__class__.__name__, self.fluent, self.expression)
def dump(self, indent=" "):
print "%s%s" % (indent, self._dump())
self.fluent.dump(indent + " ")
self.expression.dump(indent + " ")
def _dump(self):
return self.__class__.__name__
def instantiate(self, var_mapping, init_facts):
if not (isinstance(self.expression, PrimitiveNumericExpression) or
isinstance(self.expression, NumericConstant)):
raise ValueError("Cannot instantiate assignment: not normalized")
# We know that this assignment is a cost effect of an action (for initial state
# assignments, "instantiate" is not called). Hence, we know that the fluent is
# the 0-ary "total-cost" which does not need to be instantiated
assert self.fluent.symbol == "total-cost"
fluent = self.fluent
expression = self.expression.instantiate(var_mapping, init_facts)
return self.__class__(fluent, expression)
class Assign(FunctionAssignment):
def __str__(self):
return "%s := %s" % (self.fluent, self.expression)
class Increase(FunctionAssignment):
pass
| gpl-2.0 | 4,340,161,234,069,375,500 | 37.557971 | 96 | 0.601203 | false |
daj0ker/BinPy | BinPy/examples/source/ic/Series_4000/IC4069.py | 5 | 1254 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 4069
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 4069:
ic = IC_4069()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {2: 0, 3: 1, 4: 0, 5: 1, 7: 0, 9: 1, 10: 1, 11: 1, 12: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0}) -- \n
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(2, c)
print(c)
| bsd-3-clause | -517,445,174,079,120,600 | 14.108434 | 71 | 0.64673 | false |
zelbanna/sdcp | core/genlib.py | 1 | 3476 | """Module docstring.
Generic Library. Many are for reference, make them "inline"
"""
__author__ = "Zacharias El Banna"
################################# Generics ####################################
def debug_decorator(func_name):
def decorator(func):
def decorated(*args,**kwargs):
res = func(*args,**kwargs)
print("DEBUGGER: %s(%s,%s) => %s"%(func_name, args, kwargs, res))
return res
return decorated
return decorator
#
# Basic Auth header generator for base64 authentication
#
def basic_auth(aUsername,aPassword):
from base64 import b64encode
return {'Authorization':'Basic %s'%(b64encode(("%s:%s"%(aUsername,aPassword)).encode('utf-8')).decode()) }
def random_string(aLength):
import string
import random
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(aLength))
def get_host_name(aIP):
from socket import gethostbyaddr
try: return gethostbyaddr(aIP)[0].partition('.')[0]
except: return None
def ip2int(addr):
from struct import unpack
from socket import inet_aton
return unpack("!I", inet_aton(addr))[0]
def int2ip(addr):
from struct import pack
from socket import inet_ntoa
return inet_ntoa(pack("!I", addr))
def ips2range(addr1,addr2):
from struct import pack, unpack
from socket import inet_ntoa, inet_aton
return [inet_ntoa(pack("!I", addr)) for addr in range(unpack("!I", inet_aton(addr1))[0], unpack("!I", inet_aton(addr2))[0] + 1)]
def ipint2range(start,end):
from struct import pack
from socket import inet_ntoa
return [inet_ntoa(pack("!I", addr)) for addr in range(start,end + 1)]
def ip2ptr(addr):
octets = addr.split('.')
octets.reverse()
octets.append("in-addr.arpa")
return ".".join(octets)
def ip2arpa(addr):
octets = addr.split('.')[:3]
octets.reverse()
octets.append("in-addr.arpa")
return ".".join(octets)
def int2mac(aInt):
return ':'.join("%s%s"%x for x in zip(*[iter("{:012x}".format(aInt))]*2))
def mac2int(aMAC):
try: return int(aMAC.replace(":",""),16)
except: return 0
def ping_os(ip):
from os import system
return system("ping -c 1 -w 1 " + ip + " > /dev/null 2>&1") == 0
def external_ip():
from dns import resolver
from socket import gethostbyname
try:
opendns = resolver.Resolver()
opendns.nameservers = [gethostbyname('resolver1.opendns.com')]
res = str(opendns.query("myip.opendns.com",'A').response.answer[0])
return res.split()[4]
except:
return None
def get_quote(aString):
from urllib.parse import quote_plus
return quote_plus(aString)
def str2hex(arg):
try:
return '0x{0:02x}'.format(int(arg))
except:
return '0x00'
def pidfile_write(pidfname):
from os import getpid
pidfile = open(pidfname,'w')
pidfile.write(str(getpid()))
pidfile.close()
def pidfile_read(pidfname):
pid = -1
from os import path as ospath
if ospath.isfile(pidfname):
pidfile = open(pidfname)
pid = pidfile.readline().strip('\n')
pidfile.close()
return int(pid)
def pidfile_release(pidfname):
from os import path as ospath
if ospath.isfile(pidfname):
from os import remove
remove(pidfname)
def pidfile_lock(pidfname, sleeptime = 1):
from time import sleep
from os import path as ospath
while ospath.isfile(pidfname):
sleep(sleeptime)
pidfile_write(pidfname)
def file_replace(afile,old,new):
if afile == "" or new == "" or old == "":
return False
with open(afile, 'r') as f:
filedata = f.read()
filedata = filedata.replace(old,new)
with open(afile, 'w') as f:
f.write(filedata)
return True
| gpl-3.0 | 9,216,863,462,132,861,000 | 24.558824 | 129 | 0.681243 | false |
mlperf/training_results_v0.7 | Google/benchmarks/transformer/implementations/transformer-research-TF-tpu-v4-128/lingvo/base_trial.py | 3 | 4213 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines trials for parameter exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import hyperparams
class Trial(object):
"""Base class for a trial."""
@classmethod
def Params(cls):
"""Default parameters for a trial."""
p = hyperparams.Params()
p.Define(
'report_interval_seconds', 600,
'Interval between reporting trial results and checking for early '
'stopping.')
p.Define('vizier_objective_metric_key', 'loss',
'Which eval metric to use as the "objective value" for tuning.')
p.Define(
'report_during_training', False,
'Whether to report objective metrics during the training process.')
return p
def __init__(self, params):
self._params = params.Copy()
self._next_report_time = time.time()
@property
def report_interval_seconds(self):
return self._params.report_interval_seconds
@property
def objective_metric_key(self):
return self._params.vizier_objective_metric_key
def Name(self):
raise NotImplementedError('Abstract method')
def OverrideModelParams(self, model_params):
"""Modifies `model_params` according to trial params.
Through this method a `Trial` may tweak model hyperparams (e.g., learning
rate, shape, depth, or width of networks).
Args:
model_params: the original model hyperparams.
Returns:
The modified `model_params`.
"""
raise NotImplementedError('Abstract method')
def ShouldStop(self):
"""Returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
def ReportDone(self, infeasible=False, infeasible_reason=''):
"""Report that the trial is completed."""
raise NotImplementedError('Abstract method')
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
"""Returns whether the trial should stop.
Args:
global_step: The global step counter.
metrics_dict: If not None, contains the metric should be
reported. If None, do nothing but returns whether the
trial should stop.
"""
if not metrics_dict or not self._params.report_during_training:
return self.ShouldStop()
if time.time() < self._next_report_time:
return False
self._next_report_time = time.time() + self.report_interval_seconds
return self._DoReportTrainingProgress(global_step, metrics_dict)
def _DoReportTrainingProgress(self, global_step, metrics_dict):
raise NotImplementedError('Abstract method')
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
"""Reports eval measurement and returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
class NoOpTrial(Trial):
"""A Trial implementation that does nothing."""
def __init__(self):
super(NoOpTrial, self).__init__(Trial.Params())
def Name(self):
return ''
def OverrideModelParams(self, model_params):
return model_params
def ShouldStop(self):
return False
def ReportDone(self, infeasible=False, infeasible_reason=''):
return False
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
del global_step, metrics_dict # Unused
return False
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
del global_step, metrics_dict, checkpoint_path # Unused
return False
| apache-2.0 | -9,022,248,341,449,987,000 | 31.658915 | 101 | 0.696653 | false |
Anonymous-X6/django | django/contrib/gis/geos/geometry.py | 216 | 23649 | """
This module contains the 'base' GEOSGeometry object -- all GEOS Geometries
inherit from this object.
"""
from __future__ import unicode_literals
import json
from ctypes import addressof, byref, c_double
from django.contrib.gis import gdal
from django.contrib.gis.geometry.regex import hex_regex, json_regex, wkt_regex
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.mutable_list import ListMixin
from django.contrib.gis.geos.prepared import PreparedGeometry
from django.contrib.gis.geos.prototypes.io import (
ewkb_w, wkb_r, wkb_w, wkt_r, wkt_w,
)
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class GEOSGeometry(GEOSBase, ListMixin):
"A class that, generally, encapsulates a GEOS geometry."
_GEOS_CLASSES = None
ptr_type = GEOM_PTR
has_cs = False # Only Point, LineString, LinearRing have coordinate sequences
def __init__(self, geo_input, srid=None):
"""
The base constructor for GEOS geometry objects, and may take the
following inputs:
* strings:
- WKT
- HEXEWKB (a PostGIS-specific canonical form)
- GeoJSON (requires GDAL)
* buffer:
- WKB
The `srid` keyword is used to specify the Source Reference Identifier
(SRID) number for this Geometry. If not set, the SRID will be None.
"""
if isinstance(geo_input, bytes):
geo_input = force_text(geo_input)
if isinstance(geo_input, six.string_types):
wkt_m = wkt_regex.match(geo_input)
if wkt_m:
# Handling WKT input.
if wkt_m.group('srid'):
srid = int(wkt_m.group('srid'))
g = wkt_r().read(force_bytes(wkt_m.group('wkt')))
elif hex_regex.match(geo_input):
# Handling HEXEWKB input.
g = wkb_r().read(force_bytes(geo_input))
elif json_regex.match(geo_input):
# Handling GeoJSON input.
if not gdal.HAS_GDAL:
raise ValueError('Initializing geometry from JSON input requires GDAL.')
g = wkb_r().read(gdal.OGRGeometry(geo_input).wkb)
else:
raise ValueError('String or unicode input unrecognized as WKT EWKT, and HEXEWKB.')
elif isinstance(geo_input, GEOM_PTR):
# When the input is a pointer to a geometry (GEOM_PTR).
g = geo_input
elif isinstance(geo_input, six.memoryview):
# When the input is a buffer (WKB).
g = wkb_r().read(geo_input)
elif isinstance(geo_input, GEOSGeometry):
g = capi.geom_clone(geo_input.ptr)
else:
# Invalid geometry type.
raise TypeError('Improper geometry input type: %s' % str(type(geo_input)))
if g:
# Setting the pointer object with a valid pointer.
self.ptr = g
else:
raise GEOSException('Could not initialize GEOS Geometry with given input.')
# Post-initialization setup.
self._post_init(srid)
def _post_init(self, srid):
"Helper routine for performing post-initialization setup."
# Setting the SRID, if given.
if srid and isinstance(srid, int):
self.srid = srid
# Setting the class type (e.g., Point, Polygon, etc.)
if GEOSGeometry._GEOS_CLASSES is None:
# Lazy-loaded variable to avoid import conflicts with GEOSGeometry.
from .linestring import LineString, LinearRing
from .point import Point
from .polygon import Polygon
from .collections import (
GeometryCollection, MultiPoint, MultiLineString, MultiPolygon)
GEOSGeometry._GEOS_CLASSES = {
0: Point,
1: LineString,
2: LinearRing,
3: Polygon,
4: MultiPoint,
5: MultiLineString,
6: MultiPolygon,
7: GeometryCollection,
}
self.__class__ = GEOSGeometry._GEOS_CLASSES[self.geom_typeid]
# Setting the coordinate sequence for the geometry (will be None on
# geometries that do not have coordinate sequences)
self._set_cs()
def __del__(self):
"""
Destroys this Geometry; in other words, frees the memory used by the
GEOS C++ object.
"""
if self._ptr and capi:
capi.destroy_geom(self._ptr)
def __copy__(self):
"""
Returns a clone because the copy of a GEOSGeometry may contain an
invalid pointer location if the original is garbage collected.
"""
return self.clone()
def __deepcopy__(self, memodict):
"""
The `deepcopy` routine is used by the `Node` class of django.utils.tree;
thus, the protocol routine needs to be implemented to return correct
copies (clones) of these GEOS objects, which use C pointers.
"""
return self.clone()
def __str__(self):
"EWKT is used for the string representation."
return self.ewkt
def __repr__(self):
"Short-hand representation because WKT may be very large."
return '<%s object at %s>' % (self.geom_type, hex(addressof(self.ptr)))
# Pickling support
def __getstate__(self):
# The pickled state is simply a tuple of the WKB (in string form)
# and the SRID.
return bytes(self.wkb), self.srid
def __setstate__(self, state):
# Instantiating from the tuple state that was pickled.
wkb, srid = state
ptr = wkb_r().read(six.memoryview(wkb))
if not ptr:
raise GEOSException('Invalid Geometry loaded from pickled state.')
self.ptr = ptr
self._post_init(srid)
# Comparison operators
def __eq__(self, other):
"""
Equivalence testing, a Geometry may be compared with another Geometry
or a WKT representation.
"""
if isinstance(other, six.string_types):
return self.wkt == other
elif isinstance(other, GEOSGeometry):
return self.equals_exact(other)
else:
return False
def __ne__(self, other):
"The not equals operator."
return not (self == other)
# ### Geometry set-like operations ###
# Thanks to Sean Gillies for inspiration:
# http://lists.gispython.org/pipermail/community/2007-July/001034.html
# g = g1 | g2
def __or__(self, other):
"Returns the union of this Geometry and the other."
return self.union(other)
# g = g1 & g2
def __and__(self, other):
"Returns the intersection of this Geometry and the other."
return self.intersection(other)
# g = g1 - g2
def __sub__(self, other):
"Return the difference this Geometry and the other."
return self.difference(other)
# g = g1 ^ g2
def __xor__(self, other):
"Return the symmetric difference of this Geometry and the other."
return self.sym_difference(other)
# #### Coordinate Sequence Routines ####
def _set_cs(self):
"Sets the coordinate sequence for this Geometry."
if self.has_cs:
self._cs = GEOSCoordSeq(capi.get_cs(self.ptr), self.hasz)
else:
self._cs = None
@property
def coord_seq(self):
"Returns a clone of the coordinate sequence for this Geometry."
if self.has_cs:
return self._cs.clone()
# #### Geometry Info ####
@property
def geom_type(self):
"Returns a string representing the Geometry type, e.g. 'Polygon'"
return capi.geos_type(self.ptr).decode()
@property
def geom_typeid(self):
"Returns an integer representing the Geometry type."
return capi.geos_typeid(self.ptr)
@property
def num_geom(self):
"Returns the number of geometries in the Geometry."
return capi.get_num_geoms(self.ptr)
@property
def num_coords(self):
"Returns the number of coordinates in the Geometry."
return capi.get_num_coords(self.ptr)
@property
def num_points(self):
"Returns the number points, or coordinates, in the Geometry."
return self.num_coords
@property
def dims(self):
"Returns the dimension of this Geometry (0=point, 1=line, 2=surface)."
return capi.get_dims(self.ptr)
def normalize(self):
"Converts this Geometry to normal form (or canonical form)."
return capi.geos_normalize(self.ptr)
# #### Unary predicates ####
@property
def empty(self):
"""
Returns a boolean indicating whether the set of points in this Geometry
are empty.
"""
return capi.geos_isempty(self.ptr)
@property
def hasz(self):
"Returns whether the geometry has a 3D dimension."
return capi.geos_hasz(self.ptr)
@property
def ring(self):
"Returns whether or not the geometry is a ring."
return capi.geos_isring(self.ptr)
@property
def simple(self):
"Returns false if the Geometry not simple."
return capi.geos_issimple(self.ptr)
@property
def valid(self):
"This property tests the validity of this Geometry."
return capi.geos_isvalid(self.ptr)
@property
def valid_reason(self):
"""
Returns a string containing the reason for any invalidity.
"""
return capi.geos_isvalidreason(self.ptr).decode()
# #### Binary predicates. ####
def contains(self, other):
"Returns true if other.within(this) returns true."
return capi.geos_contains(self.ptr, other.ptr)
def crosses(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T****** (for a point and a curve,a point and an area or a line and
an area) 0******** (for two curves).
"""
return capi.geos_crosses(self.ptr, other.ptr)
def disjoint(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FF*FF****.
"""
return capi.geos_disjoint(self.ptr, other.ptr)
def equals(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**FFF*.
"""
return capi.geos_equals(self.ptr, other.ptr)
def equals_exact(self, other, tolerance=0):
"""
Returns true if the two Geometries are exactly equal, up to a
specified tolerance.
"""
return capi.geos_equalsexact(self.ptr, other.ptr, float(tolerance))
def intersects(self, other):
"Returns true if disjoint returns false."
return capi.geos_intersects(self.ptr, other.ptr)
def overlaps(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).
"""
return capi.geos_overlaps(self.ptr, other.ptr)
def relate_pattern(self, other, pattern):
"""
Returns true if the elements in the DE-9IM intersection matrix for the
two Geometries match the elements in pattern.
"""
if not isinstance(pattern, six.string_types) or len(pattern) > 9:
raise GEOSException('invalid intersection matrix pattern')
return capi.geos_relatepattern(self.ptr, other.ptr, force_bytes(pattern))
def touches(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is FT*******, F**T***** or F***T****.
"""
return capi.geos_touches(self.ptr, other.ptr)
def within(self, other):
"""
Returns true if the DE-9IM intersection matrix for the two Geometries
is T*F**F***.
"""
return capi.geos_within(self.ptr, other.ptr)
# #### SRID Routines ####
def get_srid(self):
"Gets the SRID for the geometry, returns None if no SRID is set."
s = capi.geos_get_srid(self.ptr)
if s == 0:
return None
else:
return s
def set_srid(self, srid):
"Sets the SRID for the geometry."
capi.geos_set_srid(self.ptr, srid)
srid = property(get_srid, set_srid)
# #### Output Routines ####
@property
def ewkt(self):
"""
Returns the EWKT (SRID + WKT) of the Geometry. Note that Z values
are only included in this representation if GEOS >= 3.3.0.
"""
if self.get_srid():
return 'SRID=%s;%s' % (self.srid, self.wkt)
else:
return self.wkt
@property
def wkt(self):
"Returns the WKT (Well-Known Text) representation of this Geometry."
return wkt_w(3 if self.hasz else 2).write(self).decode()
@property
def hex(self):
"""
Returns the WKB of this Geometry in hexadecimal form. Please note
that the SRID is not included in this representation because it is not
a part of the OGC specification (use the `hexewkb` property instead).
"""
# A possible faster, all-python, implementation:
# str(self.wkb).encode('hex')
return wkb_w(3 if self.hasz else 2).write_hex(self)
@property
def hexewkb(self):
"""
Returns the EWKB of this Geometry in hexadecimal form. This is an
extension of the WKB specification that includes SRID value that are
a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write_hex(self)
@property
def json(self):
"""
Returns GeoJSON representation of this Geometry.
"""
return json.dumps({'type': self.__class__.__name__, 'coordinates': self.coords})
geojson = json
@property
def wkb(self):
"""
Returns the WKB (Well-Known Binary) representation of this Geometry
as a Python buffer. SRID and Z values are not included, use the
`ewkb` property instead.
"""
return wkb_w(3 if self.hasz else 2).write(self)
@property
def ewkb(self):
"""
Return the EWKB representation of this Geometry as a Python buffer.
This is an extension of the WKB specification that includes any SRID
value that are a part of this geometry.
"""
return ewkb_w(3 if self.hasz else 2).write(self)
@property
def kml(self):
"Returns the KML representation of this Geometry."
gtype = self.geom_type
return '<%s>%s</%s>' % (gtype, self.coord_seq.kml, gtype)
@property
def prepared(self):
"""
Returns a PreparedGeometry corresponding to this geometry -- it is
optimized for the contains, intersects, and covers operations.
"""
return PreparedGeometry(self)
# #### GDAL-specific output routines ####
@property
def ogr(self):
"Returns the OGR Geometry for this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to convert to an OGRGeometry.')
if self.srid:
try:
return gdal.OGRGeometry(self.wkb, self.srid)
except gdal.SRSException:
pass
return gdal.OGRGeometry(self.wkb)
@property
def srs(self):
"Returns the OSR SpatialReference for SRID of this Geometry."
if not gdal.HAS_GDAL:
raise GEOSException('GDAL required to return a SpatialReference object.')
if self.srid:
try:
return gdal.SpatialReference(self.srid)
except gdal.SRSException:
pass
return None
@property
def crs(self):
"Alias for `srs` property."
return self.srs
def transform(self, ct, clone=False):
"""
Requires GDAL. Transforms the geometry according to the given
transformation object, which may be an integer SRID, and WKT or
PROJ.4 string. By default, the geometry is transformed in-place and
nothing is returned. However if the `clone` keyword is set, then this
geometry will not be modified and a transformed clone will be returned
instead.
"""
srid = self.srid
if ct == srid:
# short-circuit where source & dest SRIDs match
if clone:
return self.clone()
else:
return
if (srid is None) or (srid < 0):
raise GEOSException("Calling transform() with no SRID set is not supported")
if not gdal.HAS_GDAL:
raise GEOSException("GDAL library is not available to transform() geometry.")
# Creating an OGR Geometry, which is then transformed.
g = self.ogr
g.transform(ct)
# Getting a new GEOS pointer
ptr = wkb_r().read(g.wkb)
if clone:
# User wants a cloned transformed geometry returned.
return GEOSGeometry(ptr, srid=g.srid)
if ptr:
# Reassigning pointer, and performing post-initialization setup
# again due to the reassignment.
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(g.srid)
else:
raise GEOSException('Transformed WKB was invalid.')
# #### Topology Routines ####
def _topology(self, gptr):
"Helper routine to return Geometry from the given pointer."
return GEOSGeometry(gptr, srid=self.srid)
@property
def boundary(self):
"Returns the boundary as a newly allocated Geometry object."
return self._topology(capi.geos_boundary(self.ptr))
def buffer(self, width, quadsegs=8):
"""
Returns a geometry that represents all points whose distance from this
Geometry is less than or equal to distance. Calculations are in the
Spatial Reference System of this Geometry. The optional third parameter sets
the number of segment used to approximate a quarter circle (defaults to 8).
(Text from PostGIS documentation at ch. 6.1.3)
"""
return self._topology(capi.geos_buffer(self.ptr, width, quadsegs))
@property
def centroid(self):
"""
The centroid is equal to the centroid of the set of component Geometries
of highest dimension (since the lower-dimension geometries contribute zero
"weight" to the centroid).
"""
return self._topology(capi.geos_centroid(self.ptr))
@property
def convex_hull(self):
"""
Returns the smallest convex Polygon that contains all the points
in the Geometry.
"""
return self._topology(capi.geos_convexhull(self.ptr))
def difference(self, other):
"""
Returns a Geometry representing the points making up this Geometry
that do not make up other.
"""
return self._topology(capi.geos_difference(self.ptr, other.ptr))
@property
def envelope(self):
"Return the envelope for this geometry (a polygon)."
return self._topology(capi.geos_envelope(self.ptr))
def intersection(self, other):
"Returns a Geometry representing the points shared by this Geometry and other."
return self._topology(capi.geos_intersection(self.ptr, other.ptr))
@property
def point_on_surface(self):
"Computes an interior point of this Geometry."
return self._topology(capi.geos_pointonsurface(self.ptr))
def relate(self, other):
"Returns the DE-9IM intersection matrix for this Geometry and the other."
return capi.geos_relate(self.ptr, other.ptr).decode()
def simplify(self, tolerance=0.0, preserve_topology=False):
"""
Returns the Geometry, simplified using the Douglas-Peucker algorithm
to the specified tolerance (higher tolerance => less points). If no
tolerance provided, defaults to 0.
By default, this function does not preserve topology - e.g. polygons can
be split, collapse to lines or disappear holes can be created or
disappear, and lines can cross. By specifying preserve_topology=True,
the result will have the same dimension and number of components as the
input. This is significantly slower.
"""
if preserve_topology:
return self._topology(capi.geos_preservesimplify(self.ptr, tolerance))
else:
return self._topology(capi.geos_simplify(self.ptr, tolerance))
def sym_difference(self, other):
"""
Returns a set combining the points in this Geometry not in other,
and the points in other not in this Geometry.
"""
return self._topology(capi.geos_symdifference(self.ptr, other.ptr))
def union(self, other):
"Returns a Geometry representing all the points in this Geometry and other."
return self._topology(capi.geos_union(self.ptr, other.ptr))
# #### Other Routines ####
@property
def area(self):
"Returns the area of the Geometry."
return capi.geos_area(self.ptr, byref(c_double()))
def distance(self, other):
"""
Returns the distance between the closest points on this Geometry
and the other. Units will be in those of the coordinate system of
the Geometry.
"""
if not isinstance(other, GEOSGeometry):
raise TypeError('distance() works only on other GEOS Geometries.')
return capi.geos_distance(self.ptr, other.ptr, byref(c_double()))
@property
def extent(self):
"""
Returns the extent of this geometry as a 4-tuple, consisting of
(xmin, ymin, xmax, ymax).
"""
from .point import Point
env = self.envelope
if isinstance(env, Point):
xmin, ymin = env.tuple
xmax, ymax = xmin, ymin
else:
xmin, ymin = env[0][0]
xmax, ymax = env[0][2]
return (xmin, ymin, xmax, ymax)
@property
def length(self):
"""
Returns the length of this Geometry (e.g., 0 for point, or the
circumference of a Polygon).
"""
return capi.geos_length(self.ptr, byref(c_double()))
def clone(self):
"Clones this Geometry."
return GEOSGeometry(capi.geom_clone(self.ptr), srid=self.srid)
class ProjectInterpolateMixin(object):
"""
Used for LineString and MultiLineString.
"""
def interpolate(self, distance):
return self._topology(capi.geos_interpolate(self.ptr, distance))
def interpolate_normalized(self, distance):
return self._topology(capi.geos_interpolate_normalized(self.ptr, distance))
def project(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project(self.ptr, point.ptr)
def project_normalized(self, point):
from .point import Point
if not isinstance(point, Point):
raise TypeError('locate_point argument must be a Point')
return capi.geos_project_normalized(self.ptr, point.ptr)
| bsd-3-clause | 6,653,237,889,504,415,000 | 34.035556 | 98 | 0.609244 | false |
hj91/jaikuengine | api/urls.py | 33 | 1686 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^$',
'django.views.generic.simple.redirect_to',
{'url': '/docs'}),
(r'^api(?P<the_rest>/.*)$',
'django.views.generic.simple.redirect_to',
{'url': '%(the_rest)s'}),
(r'^keys$', 'api.views.api_keys'),
(r'^keys/(?P<consumer_key>\w+)$', 'api.views.api_key'),
(r'^key$', 'api.views.api_key_legacy'),
(r'^tokens', 'api.views.api_tokens'),
(r'^docs$', 'api.views.api_docs'),
(r'^docs/(?P<doc>\w+)$', 'api.views.api_doc'),
(r'^json', 'api.views.api_call'),
(r'^loaddata', 'api.views.api_loaddata'),
(r'^cleardata', 'api.views.api_cleardata'),
(r'^request_token', 'api.views.api_request_token'),
(r'^authorize', 'api.views.api_authorize'),
(r'^access_token', 'api.views.api_access_token'),
(r'^sms_receive/(?P<vendor_secret>.*)$', 'api.views.api_vendor_sms_receive'),
(r'^process_queue$', 'api.views.api_vendor_queue_process'),
(r'^xmlrpc', 'api.views.api_xmlrpc'),
)
handler404 = 'common.views.common_404'
handler500 = 'common.views.common_500'
| apache-2.0 | 193,767,705,270,037,950 | 38.209302 | 81 | 0.644721 | false |
amenonsen/ansible | lib/ansible/modules/network/avi/avi_vsvip.py | 28 | 5474 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vsvip
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of VsVip Avi RESTful Object
description:
- This module is used to configure VsVip object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Field introduced in 17.1.1.
east_west_placement:
description:
- Force placement on all service engines in the service engine group (container clouds only).
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name for the vsvip object.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
use_standard_alb:
description:
- This overrides the cloud level default and needs to match the se group value in which it will be used if the se group use_standard_alb value is
- set.
- This is only used when fip is used for vs on azure cloud.
- Field introduced in 18.2.3.
version_added: "2.9"
type: bool
uuid:
description:
- Uuid of the vsvip object.
- Field introduced in 17.1.1.
vip:
description:
- List of virtual service ips and other shareable entities.
- Field introduced in 17.1.1.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
- Field introduced in 17.1.1.
vsvip_cloud_config_cksum:
description:
- Checksum of cloud configuration for vsvip.
- Internally set by cloud connector.
- Field introduced in 17.2.9, 18.1.2.
version_added: "2.9"
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VsVip object
avi_vsvip:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vsvip
"""
RETURN = '''
obj:
description: VsVip (api/vsvip) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_ref=dict(type='str',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_standard_alb=dict(type='bool',),
uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
vsvip_cloud_config_cksum=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vsvip',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,836,861,082,007,775,000 | 33.427673 | 157 | 0.602667 | false |
mauriceatron/iPhone-OpenGL-ES-Tutorial-Series | OpenGLES16/export_gldata.py | 5 | 2088 | #!BPY
import struct
import bpy
import Blender
def newFileName(ext):
return '.'.join(Blender.Get('filename').split('.')[:-1] + [ext])
def saveAllMeshes(filename):
for object in Blender.Object.Get():
if object.getType() == 'Mesh':
mesh = object.getData()
if (len(mesh.verts) > 0):
saveMesh(filename, mesh)
return
def saveMesh(filename, mesh):
# First, write the header. KSS.
# vertex count, triangle count, texture count
file = open(filename, "w")
file.write(struct.pack("<I", len(mesh.verts)))
file.write(struct.pack("<H", len(mesh.faces)))
file.write(struct.pack("<H", len(mesh.materials)))
# Get the textures we need. Store the file names in the data files
# as simple null terminated strings.
for mt in mesh.materials:
mtexList = mt.getTextures()
img = mtexList[0].tex.getImage()
file.write(struct.pack("s", img.getName()))
# Write the camera co-ordinates. This is our stating postiion in the map
for object in Blender.Object.Get():
if object.getType() == 'Camera':
location = object.getLocation();
file.write(struct.pack("<fff", *location))
# Write an interleaved array containing vertex co-ordinate, normal, and UV co-ord
for face in mesh.faces:
for i in range(0, 3):
file.write(struct.pack("fff", *face.v[i].co))
file.write(struct.pack("fff", *face.v[i].no))
file.write(struct.pack("ff", *face.uv[i]))
# Write out an index array of materials (textures) for the triangls
for face in mesh.faces:
file.write(struct.pack("<H", face.mat))
# Write the vertex index array. Currently not used but still here for the moment
#for face in mesh.faces:
# assert len(face.v) == 3
# for vertex in face.v:
# file.write(struct.pack("<H", vertex.index))
inEditMode = Blender.Window.EditMode()
if inEditMode:
Blender.Window.EditMode(0)
Blender.Window.FileSelector(saveAllMeshes, "Export for iPhone", newFileName("gldata"))
if inEditMode:
Blender.Window.EditMode(1) | gpl-3.0 | 5,745,829,199,527,983,000 | 32.693548 | 86 | 0.644636 | false |
hongliangzhao/seastar | tests/memcached/test_memcached.py | 25 | 22367 | #!/usr/bin/env python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from contextlib import contextmanager
import socket
import struct
import sys
import random
import argparse
import time
import re
import unittest
server_addr = None
call = None
args = None
class TimeoutError(Exception):
pass
@contextmanager
def tcp_connection(timeout=1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(server_addr)
def call(msg):
s.send(msg.encode())
return s.recv(16*1024)
yield call
s.close()
def slow(f):
def wrapper(self):
if args.fast:
raise unittest.SkipTest('Slow')
return f(self)
return wrapper
def recv_all(s):
m = b''
while True:
data = s.recv(1024)
if not data:
break
m += data
return m
def tcp_call(msg, timeout=1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(server_addr)
s.send(msg.encode())
s.shutdown(socket.SHUT_WR)
data = recv_all(s)
s.close()
return data
def udp_call_for_fragments(msg, timeout=1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
this_req_id = random.randint(-32768, 32767)
datagram = struct.pack(">hhhh", this_req_id, 0, 1, 0) + msg.encode()
sock.sendto(datagram, server_addr)
messages = {}
n_determined = None
while True:
data, addr = sock.recvfrom(1500)
req_id, seq, n, res = struct.unpack_from(">hhhh", data)
content = data[8:]
if n_determined and n_determined != n:
raise Exception('Inconsitent number of total messages, %d and %d' % (n_determined, n))
n_determined = n
if req_id != this_req_id:
raise Exception('Invalid request id: ' + req_id + ', expected ' + this_req_id)
if seq in messages:
raise Exception('Duplicate message for seq=' + seq)
messages[seq] = content
if len(messages) == n:
break
for k, v in sorted(messages.items(), key=lambda e: e[0]):
yield v
sock.close()
def udp_call(msg, **kwargs):
return b''.join(udp_call_for_fragments(msg, **kwargs))
class MemcacheTest(unittest.TestCase):
def set(self, key, value, flags=0, expiry=0):
self.assertEqual(call('set %s %d %d %d\r\n%s\r\n' % (key, flags, expiry, len(value), value)), b'STORED\r\n')
def delete(self, key):
self.assertEqual(call('delete %s\r\n' % key), b'DELETED\r\n')
def assertHasKey(self, key):
resp = call('get %s\r\n' % key)
if not resp.startswith(('VALUE %s' % key).encode()):
self.fail('Key \'%s\' should be present, but got: %s' % (key, resp.decode()))
def assertNoKey(self, key):
resp = call('get %s\r\n' % key)
if resp != b'END\r\n':
self.fail('Key \'%s\' should not be present, but got: %s' % (key, resp.decode()))
def setKey(self, key):
self.set(key, 'some value')
def getItemVersion(self, key):
m = re.match(r'VALUE %s \d+ \d+ (?P<version>\d+)' % key, call('gets %s\r\n' % key).decode())
return int(m.group('version'))
def getStat(self, name, call_fn=None):
if not call_fn: call_fn = call
resp = call_fn('stats\r\n').decode()
m = re.search(r'STAT %s (?P<value>.+)' % re.escape(name), resp, re.MULTILINE)
return m.group('value')
def flush(self):
self.assertEqual(call('flush_all\r\n'), b'OK\r\n')
def tearDown(self):
self.flush()
class TcpSpecificTests(MemcacheTest):
def test_recovers_from_errors_in_the_stream(self):
with tcp_connection() as conn:
self.assertEqual(conn('get\r\n'), b'ERROR\r\n')
self.assertEqual(conn('get key\r\n'), b'END\r\n')
def test_incomplete_command_results_in_error(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(server_addr)
s.send(b'get')
s.shutdown(socket.SHUT_WR)
self.assertEqual(recv_all(s), b'ERROR\r\n')
s.close()
def test_stream_closed_results_in_error(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(server_addr)
s.shutdown(socket.SHUT_WR)
self.assertEqual(recv_all(s), b'')
s.close()
def test_unsuccesful_parsing_does_not_leave_data_behind(self):
with tcp_connection() as conn:
self.assertEqual(conn('set key 0 0 5\r\nhello\r\n'), b'STORED\r\n')
self.assertRegexpMatches(conn('delete a b c\r\n'), b'^(CLIENT_)?ERROR.*\r\n$')
self.assertEqual(conn('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
self.assertEqual(conn('delete key\r\n'), b'DELETED\r\n')
def test_flush_all_no_reply(self):
self.assertEqual(call('flush_all noreply\r\n'), b'')
def test_set_no_reply(self):
self.assertEqual(call('set key 0 0 5 noreply\r\nhello\r\nget key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
self.delete('key')
def test_delete_no_reply(self):
self.setKey('key')
self.assertEqual(call('delete key noreply\r\nget key\r\n'), b'END\r\n')
def test_add_no_reply(self):
self.assertEqual(call('add key 0 0 1 noreply\r\na\r\nget key\r\n'), b'VALUE key 0 1\r\na\r\nEND\r\n')
self.delete('key')
def test_replace_no_reply(self):
self.assertEqual(call('set key 0 0 1\r\na\r\n'), b'STORED\r\n')
self.assertEqual(call('replace key 0 0 1 noreply\r\nb\r\nget key\r\n'), b'VALUE key 0 1\r\nb\r\nEND\r\n')
self.delete('key')
def test_cas_noreply(self):
self.assertNoKey('key')
self.assertEqual(call('cas key 0 0 1 1 noreply\r\na\r\n'), b'')
self.assertNoKey('key')
self.assertEqual(call('add key 0 0 5\r\nhello\r\n'), b'STORED\r\n')
version = self.getItemVersion('key')
self.assertEqual(call('cas key 1 0 5 %d noreply\r\naloha\r\n' % (version + 1)), b'')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
self.assertEqual(call('cas key 1 0 5 %d noreply\r\naloha\r\n' % (version)), b'')
self.assertEqual(call('get key\r\n'), b'VALUE key 1 5\r\naloha\r\nEND\r\n')
self.delete('key')
@slow
def test_connection_statistics(self):
with tcp_connection() as conn:
curr_connections = int(self.getStat('curr_connections', call_fn=conn))
total_connections = int(self.getStat('total_connections', call_fn=conn))
with tcp_connection() as conn2:
self.assertEquals(curr_connections + 1, int(self.getStat('curr_connections', call_fn=conn)))
self.assertEquals(total_connections + 1, int(self.getStat('total_connections', call_fn=conn)))
self.assertEquals(total_connections + 1, int(self.getStat('total_connections', call_fn=conn)))
time.sleep(0.1)
self.assertEquals(curr_connections, int(self.getStat('curr_connections', call_fn=conn)))
class UdpSpecificTests(MemcacheTest):
def test_large_response_is_split_into_mtu_chunks(self):
max_datagram_size = 1400
data = '1' * (max_datagram_size*3)
self.set('key', data)
chunks = list(udp_call_for_fragments('get key\r\n'))
for chunk in chunks:
self.assertLessEqual(len(chunk), max_datagram_size)
self.assertEqual(b''.join(chunks).decode(),
'VALUE key 0 %d\r\n%s\r\n' \
'END\r\n' % (len(data), data))
self.delete('key')
class TestCommands(MemcacheTest):
def test_basic_commands(self):
self.assertEqual(call('get key\r\n'), b'END\r\n')
self.assertEqual(call('set key 0 0 5\r\nhello\r\n'), b'STORED\r\n')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
self.assertEqual(call('delete key\r\n'), b'DELETED\r\n')
self.assertEqual(call('delete key\r\n'), b'NOT_FOUND\r\n')
self.assertEqual(call('get key\r\n'), b'END\r\n')
def test_error_handling(self):
self.assertEqual(call('get\r\n'), b'ERROR\r\n')
@slow
def test_expiry(self):
self.assertEqual(call('set key 0 1 5\r\nhello\r\n'), b'STORED\r\n')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
time.sleep(1)
self.assertEqual(call('get key\r\n'), b'END\r\n')
@slow
def test_expiry_at_epoch_time(self):
expiry = int(time.time()) + 1
self.assertEqual(call('set key 0 %d 5\r\nhello\r\n' % expiry), b'STORED\r\n')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n')
time.sleep(2)
self.assertEqual(call('get key\r\n'), b'END\r\n')
def test_multiple_keys_in_get(self):
self.assertEqual(call('set key1 0 0 2\r\nv1\r\n'), b'STORED\r\n')
self.assertEqual(call('set key 0 0 2\r\nv2\r\n'), b'STORED\r\n')
resp = call('get key1 key\r\n')
self.assertRegexpMatches(resp, b'^(VALUE key1 0 2\r\nv1\r\nVALUE key 0 2\r\nv2\r\nEND\r\n)|(VALUE key 0 2\r\nv2\r\nVALUE key1 0 2\r\nv1\r\nEND\r\n)$')
self.delete("key")
self.delete("key1")
def test_flush_all(self):
self.set('key', 'value')
self.assertEqual(call('flush_all\r\n'), b'OK\r\n')
self.assertNoKey('key')
def test_keys_set_after_flush_remain(self):
self.assertEqual(call('flush_all\r\n'), b'OK\r\n')
self.setKey('key')
self.assertHasKey('key')
self.delete('key')
@slow
def test_flush_all_with_timeout_flushes_all_keys_even_those_set_after_flush(self):
self.setKey('key')
self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n')
self.assertHasKey('key')
self.setKey('key2')
time.sleep(2)
self.assertNoKey('key')
self.assertNoKey('key2')
@slow
def test_subsequent_flush_is_merged(self):
self.setKey('key')
self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n') # Can flush in anything between 1-2
self.assertEqual(call('flush_all 4\r\n'), b'OK\r\n') # Can flush in anything between 3-4
time.sleep(2)
self.assertHasKey('key')
self.setKey('key2')
time.sleep(4)
self.assertNoKey('key')
self.assertNoKey('key2')
@slow
def test_immediate_flush_cancels_delayed_flush(self):
self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n')
self.assertEqual(call('flush_all\r\n'), b'OK\r\n')
self.setKey('key')
time.sleep(1)
self.assertHasKey('key')
self.delete('key')
@slow
def test_flushing_in_the_past(self):
self.setKey('key1')
time.sleep(1)
self.setKey('key2')
key2_time = int(time.time())
self.assertEqual(call('flush_all %d\r\n' % (key2_time - 1)), b'OK\r\n')
self.assertNoKey("key1")
self.assertNoKey("key2")
@slow
def test_memcache_does_not_crash_when_flushing_with_already_expred_items(self):
self.assertEqual(call('set key1 0 2 5\r\nhello\r\n'), b'STORED\r\n')
time.sleep(1)
self.assertEqual(call('flush_all\r\n'), b'OK\r\n')
def test_response_spanning_many_datagrams(self):
key1_data = '1' * 1000
key2_data = '2' * 1000
key3_data = '3' * 1000
self.set('key1', key1_data)
self.set('key2', key2_data)
self.set('key3', key3_data)
resp = call('get key1 key2 key3\r\n').decode()
pattern = '^VALUE (?P<v1>.*?\r\n.*?)\r\nVALUE (?P<v2>.*?\r\n.*?)\r\nVALUE (?P<v3>.*?\r\n.*?)\r\nEND\r\n$'
self.assertRegexpMatches(resp, pattern)
m = re.match(pattern, resp)
self.assertEqual(set([m.group('v1'), m.group('v2'), m.group('v3')]),
set(['key1 0 %d\r\n%s' % (len(key1_data), key1_data),
'key2 0 %d\r\n%s' % (len(key2_data), key2_data),
'key3 0 %d\r\n%s' % (len(key3_data), key3_data)]))
self.delete('key1')
self.delete('key2')
self.delete('key3')
def test_version(self):
self.assertRegexpMatches(call('version\r\n'), b'^VERSION .*\r\n$')
def test_add(self):
self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'STORED\r\n')
self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'NOT_STORED\r\n')
self.delete('key')
def test_replace(self):
self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'STORED\r\n')
self.assertEqual(call('replace key 0 0 1\r\na\r\n'), b'STORED\r\n')
self.delete('key')
self.assertEqual(call('replace key 0 0 1\r\na\r\n'), b'NOT_STORED\r\n')
def test_cas_and_gets(self):
self.assertEqual(call('cas key 0 0 1 1\r\na\r\n'), b'NOT_FOUND\r\n')
self.assertEqual(call('add key 0 0 5\r\nhello\r\n'), b'STORED\r\n')
version = self.getItemVersion('key')
self.assertEqual(call('set key 1 0 5\r\nhello\r\n'), b'STORED\r\n')
self.assertEqual(call('gets key\r\n').decode(), 'VALUE key 1 5 %d\r\nhello\r\nEND\r\n' % (version + 1))
self.assertEqual(call('cas key 0 0 5 %d\r\nhello\r\n' % (version)), b'EXISTS\r\n')
self.assertEqual(call('cas key 0 0 5 %d\r\naloha\r\n' % (version + 1)), b'STORED\r\n')
self.assertEqual(call('gets key\r\n').decode(), 'VALUE key 0 5 %d\r\naloha\r\nEND\r\n' % (version + 2))
self.delete('key')
def test_curr_items_stat(self):
self.assertEquals(0, int(self.getStat('curr_items')))
self.setKey('key')
self.assertEquals(1, int(self.getStat('curr_items')))
self.delete('key')
self.assertEquals(0, int(self.getStat('curr_items')))
def test_how_stats_change_with_different_commands(self):
get_count = int(self.getStat('cmd_get'))
set_count = int(self.getStat('cmd_set'))
flush_count = int(self.getStat('cmd_flush'))
total_items = int(self.getStat('total_items'))
get_misses = int(self.getStat('get_misses'))
get_hits = int(self.getStat('get_hits'))
cas_hits = int(self.getStat('cas_hits'))
cas_badval = int(self.getStat('cas_badval'))
cas_misses = int(self.getStat('cas_misses'))
delete_misses = int(self.getStat('delete_misses'))
delete_hits = int(self.getStat('delete_hits'))
curr_connections = int(self.getStat('curr_connections'))
incr_hits = int(self.getStat('incr_hits'))
incr_misses = int(self.getStat('incr_misses'))
decr_hits = int(self.getStat('decr_hits'))
decr_misses = int(self.getStat('decr_misses'))
call('get key\r\n')
get_count += 1
get_misses += 1
call('gets key\r\n')
get_count += 1
get_misses += 1
call('set key1 0 0 1\r\na\r\n')
set_count += 1
total_items += 1
call('get key1\r\n')
get_count += 1
get_hits += 1
call('add key1 0 0 1\r\na\r\n')
set_count += 1
call('add key2 0 0 1\r\na\r\n')
set_count += 1
total_items += 1
call('replace key1 0 0 1\r\na\r\n')
set_count += 1
total_items += 1
call('replace key3 0 0 1\r\na\r\n')
set_count += 1
call('cas key4 0 0 1 1\r\na\r\n')
set_count += 1
cas_misses += 1
call('cas key1 0 0 1 %d\r\na\r\n' % self.getItemVersion('key1'))
set_count += 1
get_count += 1
get_hits += 1
cas_hits += 1
total_items += 1
call('cas key1 0 0 1 %d\r\na\r\n' % (self.getItemVersion('key1') + 1))
set_count += 1
get_count += 1
get_hits += 1
cas_badval += 1
call('delete key1\r\n')
delete_hits += 1
call('delete key1\r\n')
delete_misses += 1
call('incr num 1\r\n')
incr_misses += 1
call('decr num 1\r\n')
decr_misses += 1
call('set num 0 0 1\r\n0\r\n')
set_count += 1
total_items += 1
call('incr num 1\r\n')
incr_hits += 1
call('decr num 1\r\n')
decr_hits += 1
self.flush()
flush_count += 1
self.assertEquals(get_count, int(self.getStat('cmd_get')))
self.assertEquals(set_count, int(self.getStat('cmd_set')))
self.assertEquals(flush_count, int(self.getStat('cmd_flush')))
self.assertEquals(total_items, int(self.getStat('total_items')))
self.assertEquals(get_hits, int(self.getStat('get_hits')))
self.assertEquals(get_misses, int(self.getStat('get_misses')))
self.assertEquals(cas_misses, int(self.getStat('cas_misses')))
self.assertEquals(cas_hits, int(self.getStat('cas_hits')))
self.assertEquals(cas_badval, int(self.getStat('cas_badval')))
self.assertEquals(delete_misses, int(self.getStat('delete_misses')))
self.assertEquals(delete_hits, int(self.getStat('delete_hits')))
self.assertEquals(0, int(self.getStat('curr_items')))
self.assertEquals(curr_connections, int(self.getStat('curr_connections')))
self.assertEquals(incr_misses, int(self.getStat('incr_misses')))
self.assertEquals(incr_hits, int(self.getStat('incr_hits')))
self.assertEquals(decr_misses, int(self.getStat('decr_misses')))
self.assertEquals(decr_hits, int(self.getStat('decr_hits')))
def test_incr(self):
self.assertEqual(call('incr key 0\r\n'), b'NOT_FOUND\r\n')
self.assertEqual(call('set key 0 0 1\r\n0\r\n'), b'STORED\r\n')
self.assertEqual(call('incr key 0\r\n'), b'0\r\n')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 1\r\n0\r\nEND\r\n')
self.assertEqual(call('incr key 1\r\n'), b'1\r\n')
self.assertEqual(call('incr key 2\r\n'), b'3\r\n')
self.assertEqual(call('incr key %d\r\n' % (pow(2, 64) - 1)), b'2\r\n')
self.assertEqual(call('incr key %d\r\n' % (pow(2, 64) - 3)), b'18446744073709551615\r\n')
self.assertRegexpMatches(call('incr key 1\r\n').decode(), r'0(\w+)?\r\n')
self.assertEqual(call('set key 0 0 2\r\n1 \r\n'), b'STORED\r\n')
self.assertEqual(call('incr key 1\r\n'), b'2\r\n')
self.assertEqual(call('set key 0 0 2\r\n09\r\n'), b'STORED\r\n')
self.assertEqual(call('incr key 1\r\n'), b'10\r\n')
def test_decr(self):
self.assertEqual(call('decr key 0\r\n'), b'NOT_FOUND\r\n')
self.assertEqual(call('set key 0 0 1\r\n7\r\n'), b'STORED\r\n')
self.assertEqual(call('decr key 1\r\n'), b'6\r\n')
self.assertEqual(call('get key\r\n'), b'VALUE key 0 1\r\n6\r\nEND\r\n')
self.assertEqual(call('decr key 6\r\n'), b'0\r\n')
self.assertEqual(call('decr key 2\r\n'), b'0\r\n')
self.assertEqual(call('set key 0 0 2\r\n20\r\n'), b'STORED\r\n')
self.assertRegexpMatches(call('decr key 11\r\n').decode(), r'^9( )?\r\n$')
self.assertEqual(call('set key 0 0 3\r\n100\r\n'), b'STORED\r\n')
self.assertRegexpMatches(call('decr key 91\r\n').decode(), r'^9( )?\r\n$')
self.assertEqual(call('set key 0 0 2\r\n1 \r\n'), b'STORED\r\n')
self.assertEqual(call('decr key 1\r\n'), b'0\r\n')
self.assertEqual(call('set key 0 0 2\r\n09\r\n'), b'STORED\r\n')
self.assertEqual(call('decr key 1\r\n'), b'8\r\n')
def test_incr_and_decr_on_invalid_input(self):
error_msg = b'CLIENT_ERROR cannot increment or decrement non-numeric value\r\n'
for cmd in ['incr', 'decr']:
for value in ['', '-1', 'a', '0x1', '18446744073709551616']:
self.assertEqual(call('set key 0 0 %d\r\n%s\r\n' % (len(value), value)), b'STORED\r\n')
prev = call('get key\r\n')
self.assertEqual(call(cmd + ' key 1\r\n'), error_msg, "cmd=%s, value=%s" % (cmd, value))
self.assertEqual(call('get key\r\n'), prev)
self.delete('key')
def wait_for_memcache_tcp(timeout=4):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timeout_at = time.time() + timeout
while True:
if time.time() >= timeout_at:
raise TimeoutError()
try:
s.connect(server_addr)
s.close()
break
except ConnectionRefusedError:
time.sleep(0.1)
def wait_for_memcache_udp(timeout=4):
timeout_at = time.time() + timeout
while True:
if time.time() >= timeout_at:
raise TimeoutError()
try:
udp_call('version\r\n', timeout=0.2)
break
except socket.timeout:
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="memcache protocol tests")
parser.add_argument('--server', '-s', action="store", help="server adddress in <host>:<port> format", default="localhost:11211")
parser.add_argument('--udp', '-U', action="store_true", help="Use UDP protocol")
parser.add_argument('--fast', action="store_true", help="Run only fast tests")
args = parser.parse_args()
host, port = args.server.split(':')
server_addr = (host, int(port))
if args.udp:
call = udp_call
wait_for_memcache_udp()
else:
call = tcp_call
wait_for_memcache_tcp()
runner = unittest.TextTestRunner()
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(TestCommands))
if args.udp:
suite.addTest(loader.loadTestsFromTestCase(UdpSpecificTests))
else:
suite.addTest(loader.loadTestsFromTestCase(TcpSpecificTests))
result = runner.run(suite)
if not result.wasSuccessful():
sys.exit(1)
| apache-2.0 | -7,158,991,775,894,665,000 | 36.340568 | 158 | 0.590558 | false |
ramcn/demo3 | venv/lib/python3.4/site-packages/pip/index.py | 31 | 44478 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
Inf, cached_property, normalize_name, splitext, normalize_path)
from pip.utils.deprecation import RemovedInPip7Warning, RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip.req.req_requirement import InstallationCandidate
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None):
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip7Warning,
)
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
if self.use_wheel:
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
else:
return candidate.version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should break out of the loop and continue
# on.
break
else:
# If the loop successfully completed without a break, that means
# that the origin we are testing is not a secure origin.
logger.warning(
"This repository located at %s is not a trusted host, if "
"this repository is available via HTTPS it is recommend to "
"use HTTPS instead, otherwise you may silence this warning "
"with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
warnings.warn(
"Implicitly allowing locations which are not hosted at a "
"secure origin is deprecated and will require the use of "
"--trusted-host in the future.",
RemovedInPip7Warning,
)
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return []
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
file_locations, url_locations = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(self.find_links)
file_locations.extend(fl_file_loc)
url_locations.extend(fl_url_loc)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('%d location(s) to search for versions of %s:',
len(locations), project_name)
for location in locations:
logger.debug('* %s', location)
self._validate_secure_origin(logger, location)
find_links_versions = list(self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
project_name.lower()
))
page_versions = []
for page in self._get_pages(locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, project_name.lower())
)
dependency_versions = list(self._package_versions(
(Link(url) for url in self.dependency_links), project_name.lower()
))
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = list(
self._package_versions(
(Link(url) for url in file_locations),
project_name.lower()
)
)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find an InstallationCandidate for req
Expects req, an InstallRequirement and upgrade, a boolean
Returns an InstallationCandidate or None
May raise DistributionNotFound or BestVersionAlreadyInstalled
"""
all_versions = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
[x.version for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
x for x in all_versions if x.version in _versions
]
if req.satisfied_by is not None:
# Finally add our existing versions to the front of our versions.
applicable_versions.insert(
0,
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
)
)
existing_applicable = True
else:
existing_applicable = False
applicable_versions = self._sort_versions(applicable_versions)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:]) or
"none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None and not
selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
if selected_version._deprecated_regex:
warnings.warn(
"%s discovered using a deprecated method of parsing, in the "
"future it will no longer be discovered." % req.name,
RemovedInPip7Warning,
)
return selected_version
def _find_url_name(self, index_url, url_name):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
normalized = normalize_name(project_name)
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
for link in page.rel_links():
if (normalized not in self.allow_external and not
self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None and not
link.trusted and
normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
v = self._link_package_versions(link, search_name)
if v is not None:
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file', link)
self.logged_links.add(link)
return
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug(
'Skipping link %s; unknown archive format: %s',
link,
ext,
)
self.logged_links.add(link)
return
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one', link)
self.logged_links.add(link)
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug(
'Skipping %s because the wheel filename is invalid',
link
)
return
if (pkg_resources.safe_name(wheel.name).lower() !=
pkg_resources.safe_name(search_name).lower()):
logger.debug(
'Skipping link %s; wrong project name (not %s)',
link,
search_name,
)
return
if not wheel.supported():
logger.debug(
'Skipping %s because it is not compatible with this '
'Python',
link,
)
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform",
link,
)
return
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug(
'Skipping link %s; wrong project name (not %s)',
link,
search_name,
)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search_name).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted.", link)
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug(
"Skipping %s because it is an insecure and unverifiable file.",
link,
)
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug(
'Skipping %s because Python version is incorrect', link
)
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search_name, version, link)
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
class HTMLPage(object):
"""Represents one page, along with its URL"""
# FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(b'<th>\\s*home\\s*page', re.I)
_download_re = re.compile(b'<th>\\s*download\\s+url', re.I)
_href_re = re.compile(
b'href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))',
re.I | re.S
)
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, level=2, meth=logger.info)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel") and
"internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = (
href_match.group(1) or
href_match.group(2) or
href_match.group(3)
)
if not url:
continue
try:
url = url.decode("ascii")
except UnicodeDecodeError:
continue
url = self.clean_link(urllib_parse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
@property
def is_wheel(self):
return self.ext == wheel_ext
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
| mit | -2,632,420,606,462,336,000 | 36.003328 | 79 | 0.53015 | false |
elijah513/scrapy | scrapy/extensions/throttle.py | 142 | 3586 | import logging
from scrapy.exceptions import NotConfigured
from scrapy import signals
logger = logging.getLogger(__name__)
class AutoThrottle(object):
def __init__(self, crawler):
self.crawler = crawler
if not crawler.settings.getbool('AUTOTHROTTLE_ENABLED'):
raise NotConfigured
self.debug = crawler.settings.getbool("AUTOTHROTTLE_DEBUG")
self.target_concurrency = crawler.settings.getfloat("AUTOTHROTTLE_TARGET_CONCURRENCY")
crawler.signals.connect(self._spider_opened, signal=signals.spider_opened)
crawler.signals.connect(self._response_downloaded, signal=signals.response_downloaded)
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def _spider_opened(self, spider):
self.mindelay = self._min_delay(spider)
self.maxdelay = self._max_delay(spider)
spider.download_delay = self._start_delay(spider)
def _min_delay(self, spider):
s = self.crawler.settings
return getattr(spider, 'download_delay', s.getfloat('DOWNLOAD_DELAY'))
def _max_delay(self, spider):
return self.crawler.settings.getfloat('AUTOTHROTTLE_MAX_DELAY')
def _start_delay(self, spider):
return max(self.mindelay, self.crawler.settings.getfloat('AUTOTHROTTLE_START_DELAY'))
def _response_downloaded(self, response, request, spider):
key, slot = self._get_slot(request, spider)
latency = request.meta.get('download_latency')
if latency is None or slot is None:
return
olddelay = slot.delay
self._adjust_delay(slot, latency, response)
if self.debug:
diff = slot.delay - olddelay
size = len(response.body)
conc = len(slot.transferring)
logger.info(
"slot: %(slot)s | conc:%(concurrency)2d | "
"delay:%(delay)5d ms (%(delaydiff)+d) | "
"latency:%(latency)5d ms | size:%(size)6d bytes",
{
'slot': key, 'concurrency': conc,
'delay': slot.delay * 1000, 'delaydiff': diff * 1000,
'latency': latency * 1000, 'size': size
},
extra={'spider': spider}
)
def _get_slot(self, request, spider):
key = request.meta.get('download_slot')
return key, self.crawler.engine.downloader.slots.get(key)
def _adjust_delay(self, slot, latency, response):
"""Define delay adjustment policy"""
# If a server needs `latency` seconds to respond then
# we should send a request each `latency/N` seconds
# to have N requests processed in parallel
target_delay = latency / self.target_concurrency
# Adjust the delay to make it closer to target_delay
new_delay = (slot.delay + target_delay) / 2.0
# If target delay is bigger than old delay, then use it instead of mean.
# It works better with problematic sites.
new_delay = max(target_delay, new_delay)
# Make sure self.mindelay <= new_delay <= self.max_delay
new_delay = min(max(self.mindelay, new_delay), self.maxdelay)
# Dont adjust delay if response status != 200 and new delay is smaller
# than old one, as error pages (and redirections) are usually small and
# so tend to reduce latency, thus provoking a positive feedback by
# reducing delay instead of increase.
if response.status != 200 and new_delay <= slot.delay:
return
slot.delay = new_delay
| bsd-3-clause | 3,356,887,223,113,517,000 | 37.55914 | 94 | 0.622142 | false |
caasiu/xbmc-addons-chinese | service.subtitles.subhd/service.py | 1 | 11567 | # -*- coding: utf-8 -*-
import re
import os
import sys
import xbmc
import urllib
import xbmcvfs
import xbmcaddon
import xbmcgui,xbmcplugin
from bs4 import BeautifulSoup
import requests
import simplejson
__addon__ = xbmcaddon.Addon()
__author__ = __addon__.getAddonInfo('author')
__scriptid__ = __addon__.getAddonInfo('id')
__scriptname__ = __addon__.getAddonInfo('name')
__version__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__cwd__ = xbmc.translatePath( __addon__.getAddonInfo('path') ).decode("utf-8")
__profile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode("utf-8")
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ) ).decode("utf-8")
__temp__ = xbmc.translatePath( os.path.join( __profile__, 'temp') ).decode("utf-8")
sys.path.append (__resource__)
SUBHD_API = 'http://subhd.com/search/%s'
SUBHD_BASE = 'http://subhd.com'
UserAgent = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)'
def log(module, msg):
xbmc.log((u"%s::%s - %s" % (__scriptname__,module,msg,)).encode('utf-8'),level=xbmc.LOGDEBUG )
def normalizeString(str):
return str
def session_get(url, id='', referer=''):
if id:
HEADERS={'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Host': 'subhd.com',
'Origin': 'http://subhd.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0'}
s = requests.Session()
s.headers.update(HEADERS)
r = s.get(referer)
s.headers.update({'Referer': referer})
r = s.post(url, data={'sub_id': id})
return r.content
else:
HEADERS={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0'}
s = requests.Session()
s.headers.update(HEADERS)
r = s.get(url)
return r.content
def Search( item ):
subtitles_list = []
log(sys._getframe().f_code.co_name, "Search for [%s] by name" % (os.path.basename( item['file_original_path'] ),))
if item['mansearch']:
search_string = item['mansearchstr']
elif len(item['tvshow']) > 0:
search_string = "%s S%.2dE%.2d" % (item['tvshow'],
int(item['season']),
int(item['episode']))
else:
search_string = item['title']
url = SUBHD_API % (urllib.quote(search_string))
data = session_get(url)
try:
soup = BeautifulSoup(data, "html.parser")
except:
return
results = soup.find_all("div", class_="box")
# if can't find subtitle for the specified episode, try the whole season instead
if (len(results) == 0) and (len(item['tvshow']) > 0):
search_string = "%s S%.2d" % (item['tvshow'], int(item['season']))
url = SUBHD_API % (urllib.quote(search_string))
data = session_get(url)
try:
soup = BeautifulSoup(data, "html.parser")
except:
return
results = [x for x in soup.find_all("div", class_="box") if x.find('div', class_='tvlist')]
for it in results:
link = SUBHD_BASE + it.find("div", class_="d_title").a.get('href').encode('utf-8')
version = it.find("div", class_="d_title").a.get('title').encode('utf-8')
if version.find('本字幕按 ') == 0:
version = version.split()[1]
try:
group = it.find("div", class_="d_zu").text.encode('utf-8')
if group.isspace():
group = ''
except:
group = ''
if group and (version.find(group) == -1):
version += ' ' + group
try:
r2 = it.find_all("span", class_="label")
langs = [x.text.encode('utf-8') for x in r2][:-1]
except:
langs = '未知'
name = '%s (%s)' % (version, ",".join(langs))
if ('英文' in langs) and not(('简体' in langs) or ('繁体' in langs)):
subtitles_list.append({"language_name":"English", "filename":name, "link":link, "language_flag":'en', "rating":"0", "lang":langs})
else:
subtitles_list.append({"language_name":"Chinese", "filename":name, "link":link, "language_flag":'zh', "rating":"0", "lang":langs})
if subtitles_list:
for it in subtitles_list:
listitem = xbmcgui.ListItem(label=it["language_name"],
label2=it["filename"],
iconImage=it["rating"],
thumbnailImage=it["language_flag"]
)
listitem.setProperty( "sync", "false" )
listitem.setProperty( "hearing_imp", "false" )
url = "plugin://%s/?action=download&link=%s&lang=%s" % (__scriptid__,
it["link"],
it["lang"]
)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=listitem,isFolder=False)
def rmtree(path):
if isinstance(path, unicode):
path = path.encode('utf-8')
dirs, files = xbmcvfs.listdir(path)
for dir in dirs:
rmtree(os.path.join(path, dir))
for file in files:
xbmcvfs.delete(os.path.join(path, file))
xbmcvfs.rmdir(path)
def Download(url,lang):
try: rmtree(__temp__)
except: pass
try: os.makedirs(__temp__)
except: pass
referer = url
subtitle_list = []
exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass" ]
try:
data = session_get(url)
soup = BeautifulSoup(data, "html.parser")
id = soup.find("button", class_="btn btn-danger btn-sm").get("sid").encode('utf-8')
url = "http://subhd.com/ajax/down_ajax"
data = session_get(url, id=id, referer=referer)
json_response = simplejson.loads(data)
if json_response['success']:
url = json_response['url'].replace(r'\/','/').decode("unicode-escape").encode('utf-8')
if url[:4] <> 'http':
url = 'http://subhd.com%s' % (url)
log(sys._getframe().f_code.co_name, "Downloading %s" % (url.decode('utf-8')))
data = session_get(url)
else:
msg = json_response['msg'].decode("unicode-escape")
xbmc.executebuiltin((u'XBMC.Notification("subhd","%s")' % (msg)).encode('utf-8'), True)
data = ''
except:
log(sys._getframe().f_code.co_name, "%s (%d) [%s]" % (
sys.exc_info()[2].tb_frame.f_code.co_name,
sys.exc_info()[2].tb_lineno,
sys.exc_info()[1]
))
return []
if len(data) < 1024:
return []
zip = os.path.join(__temp__, "subtitles%s" % os.path.splitext(url)[1])
with open(zip, "wb") as subFile:
subFile.write(data)
subFile.close()
xbmc.sleep(500)
if data[:4] == 'Rar!' or data[:2] == 'PK':
xbmc.executebuiltin(('XBMC.Extract("%s","%s")' % (zip,__temp__,)).encode('utf-8'), True)
path = __temp__
dirs, files = xbmcvfs.listdir(path)
if ('__MACOSX') in dirs:
dirs.remove('__MACOSX')
if len(dirs) > 0:
path = os.path.join(__temp__, dirs[0].decode('utf-8'))
dirs, files = xbmcvfs.listdir(path)
list = []
for subfile in files:
if (os.path.splitext( subfile )[1] in exts):
list.append(subfile.decode('utf-8'))
if len(list) == 1:
subtitle_list.append(os.path.join(path, list[0]))
elif len(list) > 1:
sel = xbmcgui.Dialog().select('请选择压缩包中的字幕', list)
if sel == -1:
sel = 0
subtitle_list.append(os.path.join(path, list[sel]))
return subtitle_list
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=paramstring
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
params = get_params()
if params['action'] == 'search' or params['action'] == 'manualsearch':
item = {}
item['temp'] = False
item['rar'] = False
item['mansearch'] = False
item['year'] = xbmc.getInfoLabel("VideoPlayer.Year") # Year
item['season'] = str(xbmc.getInfoLabel("VideoPlayer.Season")) # Season
item['episode'] = str(xbmc.getInfoLabel("VideoPlayer.Episode")) # Episode
item['tvshow'] = normalizeString(xbmc.getInfoLabel("VideoPlayer.TVshowtitle")) # Show
item['title'] = normalizeString(xbmc.getInfoLabel("VideoPlayer.OriginalTitle")) # try to get original title
item['file_original_path'] = urllib.unquote(xbmc.Player().getPlayingFile().decode('utf-8')) # Full path of a playing file
item['3let_language'] = []
if 'searchstring' in params:
item['mansearch'] = True
item['mansearchstr'] = params['searchstring']
for lang in urllib.unquote(params['languages']).decode('utf-8').split(","):
item['3let_language'].append(xbmc.convertLanguage(lang,xbmc.ISO_639_2))
if item['title'] == "":
item['title'] = xbmc.getInfoLabel("VideoPlayer.Title") # no original title, get just Title
if item['title'] == os.path.basename(xbmc.Player().getPlayingFile()): # get movie title and year if is filename
title, year = xbmc.getCleanMovieTitle(item['title'])
item['title'] = normalizeString(title.replace('[','').replace(']',''))
item['year'] = year
if item['episode'].lower().find("s") > -1: # Check if season is "Special"
item['season'] = "0" #
item['episode'] = item['episode'][-1:]
if ( item['file_original_path'].find("http") > -1 ):
item['temp'] = True
elif ( item['file_original_path'].find("rar://") > -1 ):
item['rar'] = True
item['file_original_path'] = os.path.dirname(item['file_original_path'][6:])
elif ( item['file_original_path'].find("stack://") > -1 ):
stackPath = item['file_original_path'].split(" , ")
item['file_original_path'] = stackPath[0][8:]
Search(item)
elif params['action'] == 'download':
subs = Download(params["link"], params["lang"])
for sub in subs:
listitem = xbmcgui.ListItem(label=sub)
xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=sub,listitem=listitem,isFolder=False)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | 4,326,467,380,122,474,000 | 40.442446 | 142 | 0.537106 | false |
jessefeinman/FintechHackathon | venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/request.py | 780 | 2128 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| bsd-2-clause | -1,862,135,929,985,836,800 | 28.555556 | 85 | 0.613252 | false |
rpm-software-management/yum | test/simpleremovetests.py | 11 | 4749 | from testbase import *
class SimpleRemoveTests(OperationsTests):
@staticmethod
def buildPkgs(pkgs, *args):
pkgs.leaf = FakePackage('foo', '2.5', '1.1', '0', 'noarch')
pkgs.leaf.addFile('/bin/foo')
pkgs.requires_leaf = FakePackage('bar', '4')
pkgs.requires_leaf.addRequires('foo')
pkgs.requires_file = FakePackage('barkeeper', '0.8')
pkgs.requires_file.addRequires('/bin/foo')
pkgs.rr_leaf = FakePackage('baz', '5.3')
pkgs.rr_leaf.addRequires('bar')
pkgs.provides_leaf = FakePackage('foo-ng', '2.5')
pkgs.provides_leaf.addProvides('foo')
def testRemoveSingle(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf], [])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testRemoveRequired(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf, p.requires_leaf], [])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testRemoveRequiredMissing(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'bar'], [p.requires_leaf], [])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testRemoveRequiredProvided(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf, p.requires_leaf, p.provides_leaf], [])
self.assert_(res=='ok', msg)
self.assertResult( (p.requires_leaf, p.provides_leaf) )
def testRemoveRequiredAvailable(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf, p.requires_leaf], [p.provides_leaf])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testRemoveRequiredChain(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf, p.requires_leaf, p.rr_leaf], [])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testRemoveRequiredFile(self):
p = self.pkgs
res, msg = self.runOperation(['remove', 'foo'], [p.leaf, p.requires_file], [])
self.assert_(res=='ok', msg)
self.assertResult( () )
def testShellUpRm1(self):
""" Do an update for a package, and then rm it. """
pi1 = FakePackage('foo', '1', '1', '0', 'x86_64')
pa1 = FakePackage('foo', '2', '1', '0', 'x86_64')
res, msg = self.runOperation((['update', 'foo'],
['remove', 'foo'],
),
[pi1],
[pa1], multi_cmds=True)
self.assert_(res=='ok', msg)
self.assertResult(())
def testShellUpRm2(self):
""" Do an update for a package, and then rm it. """
pi1 = FakePackage('foo', '1', '1', '0', 'x86_64')
pi2 = FakePackage('foo', '1', '1', '0', 'i686')
pa1 = FakePackage('foo', '2', '1', '0', 'x86_64')
pa2 = FakePackage('foo', '2', '1', '0', 'i686')
res, msg = self.runOperation((['update', 'foo'],
['remove', 'foo.i686'],
),
[pi1, pi2],
[pa1, pa2], multi_cmds=True)
self.assert_(res=='ok', msg)
self.assertResult((pi1, ))
def testShellUpRm3(self):
""" Do an update for a package, and then rm it. """
pi1 = FakePackage('foo', '1', '1', '0', 'x86_64')
pi2 = FakePackage('foo', '1', '1', '0', 'i686')
pa1 = FakePackage('foo', '2', '1', '0', 'x86_64')
pa2 = FakePackage('foo', '2', '1', '0', 'i686')
res, msg = self.runOperation((['update', 'foo'],
['remove', 'foo.x86_64'],
),
[pi1, pi2],
[pa1, pa2], multi_cmds=True)
self.assert_(res=='ok', msg)
self.assertResult((pi2, ))
def testShellUpRm4(self):
""" Do an update for a package, and then rm it. """
pi1 = FakePackage('foo', '1', '1', '0', 'x86_64')
pi2 = FakePackage('foo', '1', '1', '0', 'i686')
pa1 = FakePackage('foo', '2', '1', '0', 'x86_64')
pa2 = FakePackage('foo', '2', '1', '0', 'i686')
res, msg = self.runOperation((['update', 'foo-2-1'],
['remove', 'foo.i686'],
),
[pi1, pi2],
[pa1, pa2], multi_cmds=True)
self.assert_(res=='ok', msg)
self.assertResult((pi1,))
| gpl-2.0 | 3,572,194,663,335,996,000 | 36.992 | 103 | 0.473363 | false |
highco-groupe/odoo | addons/procurement/procurement.py | 44 | 15869 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from psycopg2 import OperationalError
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
import openerp
PROCUREMENT_PRIORITIES = [('0', 'Not urgent'), ('1', 'Normal'), ('2', 'Urgent'), ('3', 'Very Urgent')]
class procurement_group(osv.osv):
'''
The procurement group class is used to group products together
when computing procurements. (tasks, physical products, ...)
The goal is that when you have one sale order of several products
and the products are pulled from the same or several location(s), to keep
having the moves grouped into pickings that represent the sale order.
Used in: sales order (to group delivery order lines like the so), pull/push
rules (to pack like the delivery order), on orderpoints (e.g. for wave picking
all the similar products together).
Grouping is made only if the source and the destination is the same.
Suppose you have 4 lines on a picking from Output where 2 lines will need
to come from Input (crossdock) and 2 lines coming from Stock -> Output As
the four procurement orders will have the same group ids from the SO, the
move from input will have a stock.picking with 2 grouped lines and the move
from stock will have 2 grouped lines also.
The name is usually the name of the original document (sale order) or a
sequence computed if created manually.
'''
_name = 'procurement.group'
_description = 'Procurement Requisition'
_order = "id desc"
_columns = {
'name': fields.char('Reference', required=True),
'move_type': fields.selection([
('direct', 'Partial'), ('one', 'All at once')],
'Delivery Method', required=True),
'procurement_ids': fields.one2many('procurement.order', 'group_id', 'Procurements'),
}
_defaults = {
'name': lambda self, cr, uid, c: self.pool.get('ir.sequence').get(cr, uid, 'procurement.group') or '',
'move_type': lambda self, cr, uid, c: 'direct'
}
class procurement_rule(osv.osv):
'''
A rule describe what a procurement should do; produce, buy, move, ...
'''
_name = 'procurement.rule'
_description = "Procurement Rule"
_order = "name"
def _get_action(self, cr, uid, context=None):
return []
_columns = {
'name': fields.char('Name', required=True,
help="This field will fill the packing origin and the name of its moves"),
'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."),
'group_propagation_option': fields.selection([('none', 'Leave Empty'), ('propagate', 'Propagate'), ('fixed', 'Fixed')], string="Propagation of Procurement Group"),
'group_id': fields.many2one('procurement.group', 'Fixed Procurement Group'),
'action': fields.selection(selection=lambda s, cr, uid, context=None: s._get_action(cr, uid, context=context),
string='Action', required=True),
'sequence': fields.integer('Sequence'),
'company_id': fields.many2one('res.company', 'Company'),
}
_defaults = {
'group_propagation_option': 'propagate',
'sequence': 20,
'active': True,
}
class procurement_order(osv.osv):
"""
Procurement Orders
"""
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc, date_planned, id asc'
_inherit = ['mail.thread']
_log_create = False
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document',
help="Reference of the document that created this Procurement.\n"
"This is automatically completed by Odoo."),
'company_id': fields.many2one('res.company', 'Company', required=True),
# These two fields are used for shceduling
'priority': fields.selection(PROCUREMENT_PRIORITIES, 'Priority', required=True, select=True, track_visibility='onchange'),
'date_planned': fields.datetime('Scheduled Date', required=True, select=True, track_visibility='onchange'),
'group_id': fields.many2one('procurement.group', 'Procurement Group'),
'rule_id': fields.many2one('procurement.rule', 'Rule', track_visibility='onchange', help="Chosen rule for the procurement resolution. Usually chosen by the system but can be manually set by the procurement manager to force an unusual behavior."),
'product_id': fields.many2one('product.product', 'Product', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos_qty': fields.float('UoS Quantity', states={'confirmed': [('readonly', False)]}, readonly=True),
'product_uos': fields.many2one('product.uom', 'Product UoS', states={'confirmed': [('readonly', False)]}, readonly=True),
'state': fields.selection([
('cancel', 'Cancelled'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('running', 'Running'),
('done', 'Done')
], 'Status', required=True, track_visibility='onchange', copy=False),
}
_defaults = {
'state': 'confirmed',
'priority': '1',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c)
}
def unlink(self, cr, uid, ids, context=None):
procurements = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in procurements:
if s['state'] == 'cancel':
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'),
_('Cannot delete Procurement Order(s) which are in %s state.') % s['state'])
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def do_view_procurements(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing procurement orders
of same procurement group of given ids.
'''
act_obj = self.pool.get('ir.actions.act_window')
action_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'procurement.do_view_procurements', raise_if_not_found=True)
result = act_obj.read(cr, uid, [action_id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM and UoS of changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
w = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {
'product_uom': w.uom_id.id,
'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id
}
return {'value': v}
return {}
def get_cancel_ids(self, cr, uid, ids, context=None):
return [proc.id for proc in self.browse(cr, uid, ids, context=context) if proc.state != 'done']
def cancel(self, cr, uid, ids, context=None):
#cancel only the procurements that aren't done already
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
if to_cancel_ids:
return self.write(cr, uid, to_cancel_ids, {'state': 'cancel'}, context=context)
def reset_to_confirmed(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
for procurement_id in ids:
#we intentionnaly do the browse under the for loop to avoid caching all ids which would be resource greedy
#and useless as we'll make a refresh later that will invalidate all the cache (and thus the next iteration
#will fetch all the ids again)
procurement = self.browse(cr, uid, procurement_id, context=context)
if procurement.state not in ("running", "done"):
try:
if self._assign(cr, uid, procurement, context=context):
procurement.refresh()
res = self._run(cr, uid, procurement, context=context or {})
if res:
self.write(cr, uid, [procurement.id], {'state': 'running'}, context=context)
else:
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
else:
self.message_post(cr, uid, [procurement.id], body=_('No rule matching this procurement'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'exception'}, context=context)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
return True
def check(self, cr, uid, ids, autocommit=False, context=None):
done_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
try:
result = self._check(cr, uid, procurement, context=context)
if result:
done_ids.append(procurement.id)
if autocommit:
cr.commit()
except OperationalError:
if autocommit:
cr.rollback()
continue
else:
raise
if done_ids:
self.write(cr, uid, done_ids, {'state': 'done'}, context=context)
return done_ids
#
# Method to overwrite in different procurement modules
#
def _find_suitable_rule(self, cr, uid, procurement, context=None):
'''This method returns a procurement.rule that depicts what to do with the given procurement
in order to complete its needs. It returns False if no suiting rule is found.
:param procurement: browse record
:rtype: int or False
'''
return False
def _assign(self, cr, uid, procurement, context=None):
'''This method check what to do with the given procurement in order to complete its needs.
It returns False if no solution is found, otherwise it stores the matching rule (if any) and
returns True.
:param procurement: browse record
:rtype: boolean
'''
#if the procurement already has a rule assigned, we keep it (it has a higher priority as it may have been chosen manually)
if procurement.rule_id:
return True
elif procurement.product_id.type != 'service':
rule_id = self._find_suitable_rule(cr, uid, procurement, context=context)
if rule_id:
self.write(cr, uid, [procurement.id], {'rule_id': rule_id}, context=context)
return True
return False
def _run(self, cr, uid, procurement, context=None):
'''This method implements the resolution of the given procurement
:param procurement: browse record
:returns: True if the resolution of the procurement was a success, False otherwise to set it in exception
'''
return True
def _check(self, cr, uid, procurement, context=None):
'''Returns True if the given procurement is fulfilled, False otherwise
:param procurement: browse record
:rtype: boolean
'''
return False
#
# Scheduler
#
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id = False, context=None):
'''
Call the scheduler to check the procurement order. This is intented to be done for all existing companies at
the same time, so we're running all the methods as SUPERUSER to avoid intercompany and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
# Run confirmed procurements
dom = [('state', '=', 'confirmed')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.run(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
# Check if running procurements are done
offset = 0
dom = [('state', '=', 'running')]
if company_id:
dom += [('company_id', '=', company_id)]
prev_ids = []
while True:
ids = self.search(cr, SUPERUSER_ID, dom, offset=offset, context=context)
if not ids or prev_ids == ids:
break
else:
prev_ids = ids
self.check(cr, SUPERUSER_ID, ids, autocommit=use_new_cursor, context=context)
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,433,869,022,334,233,000 | 44.600575 | 254 | 0.591531 | false |
tnemis/staging-server | baseapp/models.py | 2 | 10846 | from django.db import models
# from simple_history.models import HistoricalRecords #to store history
from django.db.models.fields import *
from django.core.mail import send_mail
from django.template.loader import get_template
from django.template import Context
from django.conf import settings
import caching.base # cache data
#to validate min and max value
from django.core.validators import MaxValueValidator, MinValueValidator
from smart_selects.db_fields import ChainedForeignKey
"""
Model for Aeeo
"""
class Aeeo(models.Model):
block_code=models.PositiveIntegerField()
username=models.CharField(max_length=11)
emis_blocks=models.CharField(max_length=22)
emis_district=models.CharField(max_length=15)
block_id=models.ForeignKey('Block')
district_id=models.ForeignKey('District')
def __unicode__(self):
return u'%s' % (self.username)
"""
Model for Teacher App Staff_Category
"""
class Staff_Category(models.Model):
staff_category_code = models.CharField(max_length=100)
staff_category_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self. staff_category_name)
"""
Model for Designation
"""
class Designation(models.Model):
designation_code = models.CharField(max_length=10)
designation_name = models.CharField(max_length=1000)
stafs = models.ForeignKey('Staff_Category')
def __unicode__(self):
return u'%s%s' % (self.designation_name,self.stafs)
class Subject(models.Model):
subject_code = models.CharField(max_length=10)
subject_name = models.CharField(max_length=1000)
designation = models.ForeignKey('Designation')
def __unicode__(self):
return u'%s' % (self.subject_name)
"""
Model for Assembly constituencies
"""
class Assembly(models.Model):
assembly_name = models.CharField(max_length=100)
district = models.ForeignKey('District')
def __unicode__(self):
return u'%s' % (self.assembly_name)
"""
Model for Parliamentary constituencies
"""
class Parliamentary(models.Model):
parliamentary_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self.parliamentary_name)
"""
Model for State
"""
class State(caching.base.CachingMixin, models.Model):
state_name = models.CharField(max_length=100)
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s' % (self.state_name)
"""
Model for District
"""
class District(caching.base.CachingMixin, models.Model):
district_code = models.PositiveIntegerField(
unique=True, validators=[MinValueValidator(3300), MaxValueValidator(3399)])
district_name = models.CharField(max_length=100)
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s' % (self.district_name)
"""
Model for Block
"""
class Block(caching.base.CachingMixin, models.Model):
block_code = models.PositiveIntegerField(
unique=True, validators=[MinValueValidator(330000), MaxValueValidator(339999)])
block_name = models.CharField(max_length=100)
block_type = models.CharField(max_length=50)
district = models.ForeignKey('District')
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s %s %s' % (self.block_code, self.block_name, self.block_type)
"""
Model for Zone Type
"""
class Zone_type(models.Model):
zone_type = models.CharField(max_length=25)
def __unicode__(self):
return u'%s' % (self.zone_type)
"""
Model for zone
"""
class Zone (models.Model):
zone_type = models.ForeignKey(Zone_type)
code = models.PositiveIntegerField(unique=True, validators=[
MinValueValidator(330000000), MaxValueValidator(339999999)])
name = models.CharField(max_length=100)
block = models.ForeignKey('Block')
def __unicode__(self):
return u'%s %s' % (self.name, self.zone_type)
"""
Model for Habitation
"""
class Habitation(caching.base.CachingMixin, models.Model):
code = models.PositiveIntegerField(unique=True)
name = models.CharField(max_length=100)
#ward_number = models.CharField(max_length=100)
block = models.ForeignKey('Block')
zone = ChainedForeignKey(
Zone, chained_field='block', chained_model_field='block', auto_choose=True)
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s' % (self.name)
"""
Model for School
"""
class School(models.Model):
school_code = BigIntegerField()
school_name = models.CharField(max_length=100)
district = models.ForeignKey('District')
block = ChainedForeignKey(
Block, chained_field='district', chained_model_field='district', auto_choose=True)
#block = models.ForeignKey('Block')
habitation = ChainedForeignKey(
Habitation, chained_field='block', chained_model_field='block', auto_choose=True)
management = models.ForeignKey('Management')
category = models.ForeignKey('Category')
student_id_count = models.PositiveIntegerField()
def __unicode__(self):
return u'%s %s %s %s %s %s %s %s' % (self.school_code, self.school_name, self.habitation.name, self.district.district_name, self.block.block_name, self.management.management_name, self.category.category_name, self.student_id_count)
"""
Model for Taluk
"""
class Taluk(caching.base.CachingMixin, models.Model):
taluk_name = models.CharField(max_length=100)
district = models.ForeignKey('District')
objects = caching.base.CachingManager()
def __unicode__(self):
return u'%s' % (self.taluk_name)
"""
Model for Educational District :
"""
class Educational_district(models.Model):
educational_district = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self.educational_district)
"""
Model for Educational Block/ Mandal/ Taluk Name
"""
class Educational_block(models.Model):
educational_block = models.CharField(max_length=100)
district = models.ForeignKey('District')
def __unicode__(self):
return u'%s' % (self.educational_block)
"""
Model for Revenue Block/ Mandal / Taluk name :
"""
class Revenue_block(models.Model):
revenue_block = models.CharField(max_length=100)
district = models.ForeignKey('District')
def __unicode__(self):
return u'%s' % (self.revenue_block)
"""
Model for Community
"""
class Community(models.Model):
community_code = models.CharField(max_length=100)
community_name = models.CharField(max_length=100)
religion = models.ForeignKey('Religion')
def __unicode__(self):
return u'%s' % (self.community_name)
"""
Model for Sub Castes
"""
class Sub_Castes(models.Model):
caste_code = models.CharField(max_length=10)
caste_name = models.CharField(max_length=1000)
community = models.ForeignKey('Community')
def __unicode__(self):
return u'%s %s %s' % (self.caste_name,self.caste_code, self.community.community_name)
"""
Model for Religion
"""
class Religion(models.Model):
religion_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self.religion_name)
"""
Model for Language
"""
class Language(models.Model):
language_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self.language_name)
"""
Model for Differently Abled
"""
class Differently_abled(models.Model):
da_code = models.CharField(max_length=100)
da_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s %s' % (self.da_code, self.da_name)
"""
Model for Disadvantaged Group
"""
class Disadvantaged_group(models.Model):
dis_group_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s' % (self.dis_group_name)
"""
Model for Government Schemes
"""
class Schemes(models.Model):
scheme_code = models.CharField(max_length=100)
scheme_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s %s' % (self.scheme_code, self.scheme_name)
"""
Model for School Management
"""
class Management(models.Model):
management_code = models.CharField(max_length=100)
management_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s %s' % (self.management_code, self.management_name)
"""
Model for School Category
"""
class Category(models.Model):
category_code = models.CharField(max_length=100)
category_name = models.CharField(max_length=100)
def __unicode__(self):
return u'%s %s' % (self.category_code, self.category_name)
"""
Model for Nationality
"""
class Nationality(models.Model):
nationality = models.CharField(max_length=50)
def __unicode__(self):
return u'%s' % (self.nationality)
"""
Model for class studying
"""
class Class_Studying(models.Model):
class_studying = models.CharField(max_length=10)
def __unicode__(self):
return u'%s' % (self.class_studying)
"""
Model for group code for hsc
"""
class Group_code(models.Model):
group_code = models.PositiveIntegerField()
group_name = models.CharField(max_length=100)
group_description = models.TextField(max_length=500)
def __unicode__(self):
return u'%s %s %s' % (self.group_code, self.group_name, self.group_description)
"""
Model for Education Medium
"""
class Education_medium(models.Model):
education_medium = models.CharField(max_length=15)
def __unicode__(self):
return u'%s' % (self.education_medium)
"""
Model for bank
"""
class Bank(models.Model):
bank = models.CharField(max_length=30)
def __unicode__(self):
return u'%s' % (self.bank)
"""
Model for academic unique_for_year
"""
class Academic_Year(models.Model):
academic_year = models.CharField(max_length=9)
def __unicode__(self):
return u'%s' % (self.academic_year)
""" Model for pool database """
class Child_detail_pool_database(models.Model):
district = models.ForeignKey('District')
block = ChainedForeignKey(
Block, chained_field='district', chained_model_field='district', auto_choose=True)
school = ChainedForeignKey(
School, chained_field='block', chained_model_field='block', auto_choose=True)
class_last_studied = models.CharField(max_length=20,blank=True,null=True)
unique_id_no = models.BigIntegerField(blank=True, null=True)
class_studying = models.ForeignKey(Class_Studying)
migrated_school = models.CharField(max_length=100,blank=True,null=True)
def __unicode__(self):
return u'%s %s %s %s %s %s %s' % (self.district.district_name, self.block.block_name, self.school.school_name, self.class_last_studied, self.unique_id_no, self.class_studying, self.migrated_school)
| mit | 8,680,728,875,108,680,000 | 23.70615 | 239 | 0.679329 | false |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/django/conf/global_settings.py | 89 | 22130 | # Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Hosts/domain names that are valid for this site.
# "*" matches anything, ".example.com" matches example.com and all subdomains
ALLOWED_HOSTS = []
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box.
LANGUAGES = (
('af', gettext_noop('Afrikaans')),
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('be', gettext_noop('Belarusian')),
('bn', gettext_noop('Bengali')),
('br', gettext_noop('Breton')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('es-ve', gettext_noop('Venezuelan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('ia', gettext_noop('Interlingua')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lb', gettext_noop('Luxembourgish')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('my', gettext_noop('Burmese')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('os', gettext_noop('Ossetic')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('udm', gettext_noop('Udmurt')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa", "ur")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# Email address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails. Deprecated, must be removed in 1.8.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info. If left empty, will default to the dummy backend.
DATABASES = {}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported by BrokenLinkEmailsMiddleware. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = None
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_CACHE_ALIAS = 'default' # Cache to store session data if using the cache session backend.
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".example.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' # class to serialize session data
#########
# CACHE #
#########
# The cache backends to use.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'auth.User'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedSHA1PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# Custom logging configuration.
LOGGING = {}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| bsd-3-clause | 1,896,713,530,117,979,000 | 35.578512 | 174 | 0.683281 | false |
dparks1134/Art | SVG/Examples/svg_write/examples/basic_shapes.py | 2 | 1608 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman
# Purpose: svg examples
# Created: 07.11.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
try:
import svgwrite
except ImportError:
# if svgwrite is not 'installed' append parent dir of __file__ to sys.path
import sys, os
sys.path.insert(0, os.path.abspath(os.path.split(os.path.abspath(__file__))[0]+'/..'))
import svgwrite
from svgwrite import cm, mm
def basic_shapes(name):
dwg = svgwrite.Drawing(filename=name, debug=True)
hlines = dwg.add(dwg.g(id='hlines', stroke='green'))
for y in range(20):
hlines.add(dwg.line(start=(2*cm, (2+y)*cm), end=(18*cm, (2+y)*cm)))
vlines = dwg.add(dwg.g(id='vline', stroke='blue'))
for x in range(17):
vlines.add(dwg.line(start=((2+x)*cm, 2*cm), end=((2+x)*cm, 21*cm)))
shapes = dwg.add(dwg.g(id='shapes', fill='red'))
# set presentation attributes at object creation as SVG-Attributes
shapes.add(dwg.circle(center=(15*cm, 8*cm), r='2.5cm', stroke='blue',
stroke_width=3))
# override the 'fill' attribute of the parent group 'shapes'
shapes.add(dwg.rect(insert=(5*cm, 5*cm), size=(45*mm, 45*mm),
fill='blue', stroke='red', stroke_width=3))
# or set presentation attributes by helper functions of the Presentation-Mixin
ellipse = shapes.add(dwg.ellipse(center=(10*cm, 15*cm), r=('5cm', '10mm')))
ellipse.fill('green', opacity=0.5).stroke('black', width=5).dasharray([20, 20])
dwg.save()
if __name__ == '__main__':
basic_shapes('basic_shapes.svg')
| gpl-3.0 | 4,035,704,171,224,204,000 | 36.395349 | 90 | 0.625 | false |
jemandez/creaturas-magicas | Configuraciones básicas/scripts/addons/blendertools-1.0.0/makewalk/floor.py | 1 | 11011 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Project Name: MakeHuman
# Product Home Page: http://www.makehuman.org/
# Code Home Page: http://code.google.com/p/makehuman/
# Authors: Thomas Larsson
# Script copyright (C) MakeHuman Team 2001-2014
# Coding Standards: See http://www.makehuman.org/node/165
import bpy
from bpy.props import BoolProperty
from mathutils import Matrix, Vector
from .utils import *
from . import fkik
#-------------------------------------------------------------
# Plane
#-------------------------------------------------------------
def getRigAndPlane(scn):
rig = None
plane = None
for ob in scn.objects:
if ob.select:
if ob.type == 'ARMATURE':
if rig:
raise MocapError("Two armatures selected: %s and %s" % (rig.name, ob.name))
else:
rig = ob
elif ob.type == 'MESH':
if plane:
raise MocapError("Two meshes selected: %s and %s" % (plane.name, ob.name))
else:
plane = ob
if rig is None:
raise MocapError("No rig selected")
return rig,plane
def getPlaneInfo(plane):
if plane is None:
ez = Vector((0,0,1))
origin = Vector((0,0,0))
rot = Matrix()
else:
mat = plane.matrix_world.to_3x3().normalized()
ez = mat.col[2]
origin = plane.location
rot = mat.to_4x4()
return ez,origin,rot
#-------------------------------------------------------------
# Offset and projection
#-------------------------------------------------------------
def getProjection(vec, ez):
return ez.dot(Vector(vec[:3]))
def getOffset(point, ez, origin):
vec = Vector(point[:3]) - origin
offset = -ez.dot(vec)
return offset
def getHeadOffset(pb, ez, origin):
head = pb.matrix.col[3]
return getOffset(head, ez, origin)
def getTailOffset(pb, ez, origin):
head = pb.matrix.col[3]
y = pb.matrix.col[1]
tail = head + y*pb.length
return getOffset(tail, ez, origin)
def addOffset(pb, offset, ez):
gmat = pb.matrix.copy()
x,y,z = offset*ez
gmat.col[3] += Vector((x,y,z,0))
pmat = fkik.getPoseMatrix(gmat, pb)
fkik.insertLocation(pb, pmat)
#-------------------------------------------------------------
# Toe below ball
#-------------------------------------------------------------
def toesBelowBall(context):
scn = context.scene
rig,plane = getRigAndPlane(scn)
try:
useIk = rig["MhaLegIk_L"] or rig["MhaLegIk_R"]
except KeyError:
useIk = False
if useIk:
raise MocapError("Toe Below Ball only for FK feet")
layers = list(rig.data.layers)
startProgress("Keep toes down")
frames = getActiveFramesBetweenMarkers(rig, scn)
print("Left toe")
toeBelowBall(scn, frames, rig, plane, ".L")
print("Right toe")
toeBelowBall(scn, frames, rig, plane, ".R")
endProgress("Toes kept down")
rig.data.layers = layers
def toeBelowBall(scn, frames, rig, plane, suffix):
from .retarget import getLocks
foot,toe,mBall,mToe,mHeel = getFkFeetBones(rig, suffix)
ez,origin,rot = getPlaneInfo(plane)
order,lock = getLocks(toe, scn)
factor = 1.0/toe.length
nFrames = len(frames)
if mBall:
for n,frame in enumerate(frames):
scn.frame_set(frame)
showProgress(n, frame, nFrames)
zToe = getProjection(mToe.matrix.col[3], ez)
zBall = getProjection(mBall.matrix.col[3], ez)
if zToe > zBall:
pmat = offsetToeRotation(toe, ez, factor, order, lock, scn)
else:
pmat = fkik.getPoseMatrix(toe.matrix, toe)
pmat = keepToeRotationNegative(pmat, scn)
fkik.insertRotation(toe, pmat)
else:
for n,frame in enumerate(frames):
scn.frame_set(frame)
showProgress(n, frame, nFrames)
dzToe = getProjection(toe.matrix.col[1], ez)
if dzToe > 0:
pmat = offsetToeRotation(toe, ez, factor, order, lock, scn)
else:
pmat = fkik.getPoseMatrix(toe.matrix, toe)
pmat = keepToeRotationNegative(pmat, scn)
fkik.insertRotation(toe, pmat)
def offsetToeRotation(toe, ez, factor, order, lock, scn):
from .retarget import correctMatrixForLocks
mat = toe.matrix.to_3x3()
y = mat.col[1]
y -= ez.dot(y)*ez
y.normalize()
x = mat.col[0]
x -= x.dot(y)*y
x.normalize()
z = x.cross(y)
mat.col[0] = x
mat.col[1] = y
mat.col[2] = z
gmat = mat.to_4x4()
gmat.col[3] = toe.matrix.col[3]
pmat = fkik.getPoseMatrix(gmat, toe)
return correctMatrixForLocks(pmat, order, lock, toe, scn.McpUseLimits)
def keepToeRotationNegative(pmat, scn):
euler = pmat.to_3x3().to_euler('YZX')
if euler.x > 0:
pmat0 = pmat
euler.x = 0
pmat = euler.to_matrix().to_4x4()
pmat.col[3] = pmat0.col[3]
return pmat
class VIEW3D_OT_McpOffsetToeButton(bpy.types.Operator):
bl_idname = "mcp.offset_toe"
bl_label = "Offset Toes"
bl_description = "Keep toes below balls"
bl_options = {'UNDO'}
def execute(self, context):
from .target import getTargetArmature
getTargetArmature(context.object, context.scene)
try:
toesBelowBall(context)
except MocapError:
bpy.ops.mcp.error('INVOKE_DEFAULT')
return{'FINISHED'}
#-------------------------------------------------------------
# Floor
#-------------------------------------------------------------
def floorFoot(context):
startProgress("Keep feet above floor")
scn = context.scene
rig,plane = getRigAndPlane(scn)
try:
useIk = rig["MhaLegIk_L"] or rig["MhaLegIk_R"]
except KeyError:
useIk = False
frames = getActiveFramesBetweenMarkers(rig, scn)
if useIk:
floorIkFoot(rig, plane, scn, frames)
else:
floorFkFoot(rig, plane, scn, frames)
endProgress("Feet kept above floor")
def getFkFeetBones(rig, suffix):
foot = getTrgBone("foot" + suffix, rig)
toe = getTrgBone("toe" + suffix, rig)
try:
mBall = rig.pose.bones["ball.marker" + suffix]
mToe = rig.pose.bones["toe.marker" + suffix]
mHeel = rig.pose.bones["heel.marker" + suffix]
except KeyError:
mBall = mToe = mHeel = None
return foot,toe,mBall,mToe,mHeel
def floorFkFoot(rig, plane, scn, frames):
hips = getTrgBone("hips", rig)
lFoot,lToe,lmBall,lmToe,lmHeel = getFkFeetBones(rig, ".L")
rFoot,rToe,rmBall,rmToe,rmHeel = getFkFeetBones(rig, ".R")
ez,origin,rot = getPlaneInfo(plane)
nFrames = len(frames)
for n,frame in enumerate(frames):
scn.frame_set(frame)
fkik.updateScene()
offset = 0
if scn.McpFloorLeft:
offset = getFkOffset(rig, ez, origin, lFoot, lToe, lmBall, lmToe, lmHeel)
if scn.McpFloorRight:
rOffset = getFkOffset(rig, ez, origin, rFoot, rToe, rmBall, rmToe, rmHeel)
if rOffset > offset:
offset = rOffset
showProgress(n, frame, nFrames)
if offset > 0:
addOffset(hips, offset, ez)
def getFkOffset(rig, ez, origin, foot, toe, mBall, mToe, mHeel):
if mBall:
offset = toeOffset = getHeadOffset(mToe, ez, origin)
ballOffset = getHeadOffset(mBall, ez, origin)
if ballOffset > offset:
offset = ballOffset
heelOffset = getHeadOffset(mHeel, ez, origin)
if heelOffset > offset:
offset = heelOffset
elif toe:
offset = getTailOffset(toe, ez, origin)
ballOffset = getHeadOffset(toe, ez, origin)
if ballOffset > offset:
offset = ballOffset
ball = toe.matrix.col[3]
y = toe.matrix.col[1]
heel = ball - y*foot.length
heelOffset = getOffset(heel, ez, origin)
if heelOffset > offset:
offset = heelOffset
else:
offset = 0
return offset
def floorIkFoot(rig, plane, scn, frames):
root = rig.pose.bones["root"]
lleg = rig.pose.bones["foot.ik.L"]
rleg = rig.pose.bones["foot.ik.R"]
ez,origin,rot = getPlaneInfo(plane)
fillKeyFrames(lleg, rig, frames, 3, mode='location')
fillKeyFrames(rleg, rig, frames, 3, mode='location')
if scn.McpFloorHips:
fillKeyFrames(root, rig, frames, 3, mode='location')
nFrames = len(frames)
for n,frame in enumerate(frames):
scn.frame_set(frame)
showProgress(n, frame, nFrames)
if scn.McpFloorLeft:
lOffset = getIkOffset(rig, ez, origin, lleg)
if lOffset > 0:
addOffset(lleg, lOffset, ez)
else:
lOffset = 0
if scn.McpFloorRight:
rOffset = getIkOffset(rig, ez, origin, rleg)
if rOffset > 0:
addOffset(rleg, rOffset, ez)
else:
rOffset = 0
hOffset = min(lOffset,rOffset)
if hOffset > 0 and scn.McpFloorHips:
addOffset(root, hOffset, ez)
def getIkOffset(rig, ez, origin, leg):
offset = getHeadOffset(leg, ez, origin)
tailOffset = getTailOffset(leg, ez, origin)
if tailOffset > offset:
offset = tailOffset
return offset
foot = rig.pose.bones["foot.rev" + suffix]
toe = rig.pose.bones["toe.rev" + suffix]
ballOffset = getTailOffset(toe, ez, origin)
if ballOffset > offset:
offset = ballOffset
ball = foot.matrix.col[3]
y = toe.matrix.col[1]
heel = ball + y*foot.length
heelOffset = getOffset(heel, ez, origin)
if heelOffset > offset:
offset = heelOffset
return offset
class VIEW3D_OT_McpFloorFootButton(bpy.types.Operator):
bl_idname = "mcp.floor_foot"
bl_label = "Keep Feet Above Floor"
bl_description = "Keep Feet Above Plane"
bl_options = {'UNDO'}
def execute(self, context):
from .target import getTargetArmature
getTargetArmature(context.object, context.scene)
try:
floorFoot(context)
except MocapError:
bpy.ops.mcp.error('INVOKE_DEFAULT')
return{'FINISHED'}
| gpl-3.0 | -5,801,952,940,549,268,000 | 29.671309 | 95 | 0.580692 | false |
Oslandia/vizitown_plugin | twisted/conch/ssh/session.py | 59 | 10817 | # -*- test-case-name: twisted.conch.test.test_session -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains the implementation of SSHSession, which (by default)
allows access to a shell and a python interpreter over SSH.
Maintainer: Paul Swartz
"""
import struct
import signal
import sys
import os
from zope.interface import implements
from twisted.internet import interfaces, protocol
from twisted.python import log
from twisted.conch.interfaces import ISession
from twisted.conch.ssh import common, channel
class SSHSession(channel.SSHChannel):
name = 'session'
def __init__(self, *args, **kw):
channel.SSHChannel.__init__(self, *args, **kw)
self.buf = ''
self.client = None
self.session = None
def request_subsystem(self, data):
subsystem, ignored= common.getNS(data)
log.msg('asking for subsystem "%s"' % subsystem)
client = self.avatar.lookupSubsystem(subsystem, data)
if client:
pp = SSHSessionProcessProtocol(self)
proto = wrapProcessProtocol(pp)
client.makeConnection(proto)
pp.makeConnection(wrapProtocol(client))
self.client = pp
return 1
else:
log.msg('failed to get subsystem')
return 0
def request_shell(self, data):
log.msg('getting shell')
if not self.session:
self.session = ISession(self.avatar)
try:
pp = SSHSessionProcessProtocol(self)
self.session.openShell(pp)
except:
log.deferr()
return 0
else:
self.client = pp
return 1
def request_exec(self, data):
if not self.session:
self.session = ISession(self.avatar)
f,data = common.getNS(data)
log.msg('executing command "%s"' % f)
try:
pp = SSHSessionProcessProtocol(self)
self.session.execCommand(pp, f)
except:
log.deferr()
return 0
else:
self.client = pp
return 1
def request_pty_req(self, data):
if not self.session:
self.session = ISession(self.avatar)
term, windowSize, modes = parseRequest_pty_req(data)
log.msg('pty request: %s %s' % (term, windowSize))
try:
self.session.getPty(term, windowSize, modes)
except:
log.err()
return 0
else:
return 1
def request_window_change(self, data):
if not self.session:
self.session = ISession(self.avatar)
winSize = parseRequest_window_change(data)
try:
self.session.windowChanged(winSize)
except:
log.msg('error changing window size')
log.err()
return 0
else:
return 1
def dataReceived(self, data):
if not self.client:
#self.conn.sendClose(self)
self.buf += data
return
self.client.transport.write(data)
def extReceived(self, dataType, data):
if dataType == connection.EXTENDED_DATA_STDERR:
if self.client and hasattr(self.client.transport, 'writeErr'):
self.client.transport.writeErr(data)
else:
log.msg('weird extended data: %s'%dataType)
def eofReceived(self):
if self.session:
self.session.eofReceived()
elif self.client:
self.conn.sendClose(self)
def closed(self):
if self.session:
self.session.closed()
elif self.client:
self.client.transport.loseConnection()
#def closeReceived(self):
# self.loseConnection() # don't know what to do with this
def loseConnection(self):
if self.client:
self.client.transport.loseConnection()
channel.SSHChannel.loseConnection(self)
class _ProtocolWrapper(protocol.ProcessProtocol):
"""
This class wraps a L{Protocol} instance in a L{ProcessProtocol} instance.
"""
def __init__(self, proto):
self.proto = proto
def connectionMade(self): self.proto.connectionMade()
def outReceived(self, data): self.proto.dataReceived(data)
def processEnded(self, reason): self.proto.connectionLost(reason)
class _DummyTransport:
def __init__(self, proto):
self.proto = proto
def dataReceived(self, data):
self.proto.transport.write(data)
def write(self, data):
self.proto.dataReceived(data)
def writeSequence(self, seq):
self.write(''.join(seq))
def loseConnection(self):
self.proto.connectionLost(protocol.connectionDone)
def wrapProcessProtocol(inst):
if isinstance(inst, protocol.Protocol):
return _ProtocolWrapper(inst)
else:
return inst
def wrapProtocol(proto):
return _DummyTransport(proto)
# SUPPORTED_SIGNALS is a list of signals that every session channel is supposed
# to accept. See RFC 4254
SUPPORTED_SIGNALS = ["ABRT", "ALRM", "FPE", "HUP", "ILL", "INT", "KILL",
"PIPE", "QUIT", "SEGV", "TERM", "USR1", "USR2"]
class SSHSessionProcessProtocol(protocol.ProcessProtocol):
"""I am both an L{IProcessProtocol} and an L{ITransport}.
I am a transport to the remote endpoint and a process protocol to the
local subsystem.
"""
implements(interfaces.ITransport)
# once initialized, a dictionary mapping signal values to strings
# that follow RFC 4254.
_signalValuesToNames = None
def __init__(self, session):
self.session = session
self.lostOutOrErrFlag = False
def connectionMade(self):
if self.session.buf:
self.transport.write(self.session.buf)
self.session.buf = None
def outReceived(self, data):
self.session.write(data)
def errReceived(self, err):
self.session.writeExtended(connection.EXTENDED_DATA_STDERR, err)
def outConnectionLost(self):
"""
EOF should only be sent when both STDOUT and STDERR have been closed.
"""
if self.lostOutOrErrFlag:
self.session.conn.sendEOF(self.session)
else:
self.lostOutOrErrFlag = True
def errConnectionLost(self):
"""
See outConnectionLost().
"""
self.outConnectionLost()
def connectionLost(self, reason = None):
self.session.loseConnection()
def _getSignalName(self, signum):
"""
Get a signal name given a signal number.
"""
if self._signalValuesToNames is None:
self._signalValuesToNames = {}
# make sure that the POSIX ones are the defaults
for signame in SUPPORTED_SIGNALS:
signame = 'SIG' + signame
sigvalue = getattr(signal, signame, None)
if sigvalue is not None:
self._signalValuesToNames[sigvalue] = signame
for k, v in signal.__dict__.items():
# Check for platform specific signals, ignoring Python specific
# SIG_DFL and SIG_IGN
if k.startswith('SIG') and not k.startswith('SIG_'):
if v not in self._signalValuesToNames:
self._signalValuesToNames[v] = k + '@' + sys.platform
return self._signalValuesToNames[signum]
def processEnded(self, reason=None):
"""
When we are told the process ended, try to notify the other side about
how the process ended using the exit-signal or exit-status requests.
Also, close the channel.
"""
if reason is not None:
err = reason.value
if err.signal is not None:
signame = self._getSignalName(err.signal)
if (getattr(os, 'WCOREDUMP', None) is not None and
os.WCOREDUMP(err.status)):
log.msg('exitSignal: %s (core dumped)' % (signame,))
coreDumped = 1
else:
log.msg('exitSignal: %s' % (signame,))
coreDumped = 0
self.session.conn.sendRequest(self.session, 'exit-signal',
common.NS(signame[3:]) + chr(coreDumped) +
common.NS('') + common.NS(''))
elif err.exitCode is not None:
log.msg('exitCode: %r' % (err.exitCode,))
self.session.conn.sendRequest(self.session, 'exit-status',
struct.pack('>L', err.exitCode))
self.session.loseConnection()
def getHost(self):
"""
Return the host from my session's transport.
"""
return self.session.conn.transport.getHost()
def getPeer(self):
"""
Return the peer from my session's transport.
"""
return self.session.conn.transport.getPeer()
def write(self, data):
self.session.write(data)
def writeSequence(self, seq):
self.session.write(''.join(seq))
def loseConnection(self):
self.session.loseConnection()
class SSHSessionClient(protocol.Protocol):
def dataReceived(self, data):
if self.transport:
self.transport.write(data)
# methods factored out to make live easier on server writers
def parseRequest_pty_req(data):
"""Parse the data from a pty-req request into usable data.
@returns: a tuple of (terminal type, (rows, cols, xpixel, ypixel), modes)
"""
term, rest = common.getNS(data)
cols, rows, xpixel, ypixel = struct.unpack('>4L', rest[: 16])
modes, ignored= common.getNS(rest[16:])
winSize = (rows, cols, xpixel, ypixel)
modes = [(ord(modes[i]), struct.unpack('>L', modes[i+1: i+5])[0]) for i in range(0, len(modes)-1, 5)]
return term, winSize, modes
def packRequest_pty_req(term, (rows, cols, xpixel, ypixel), modes):
"""Pack a pty-req request so that it is suitable for sending.
NOTE: modes must be packed before being sent here.
"""
termPacked = common.NS(term)
winSizePacked = struct.pack('>4L', cols, rows, xpixel, ypixel)
modesPacked = common.NS(modes) # depend on the client packing modes
return termPacked + winSizePacked + modesPacked
def parseRequest_window_change(data):
"""Parse the data from a window-change request into usuable data.
@returns: a tuple of (rows, cols, xpixel, ypixel)
"""
cols, rows, xpixel, ypixel = struct.unpack('>4L', data)
return rows, cols, xpixel, ypixel
def packRequest_window_change((rows, cols, xpixel, ypixel)):
"""Pack a window-change request so that it is suitable for sending.
"""
return struct.pack('>4L', cols, rows, xpixel, ypixel)
import connection
| gpl-2.0 | 1,108,512,902,680,832,400 | 30.083333 | 105 | 0.601923 | false |
apruden/opal | opal-python-client/src/main/python/opal/file.py | 1 | 3231 | """
Opal file management.
"""
import sys
import pycurl
import opal.core
class OpalFile:
"""
File on Opal file system
"""
def __init__(self, path):
self.path = path
def get_meta_ws(self):
return '/files/meta' + self.path
def get_ws(self):
return '/files' + self.path
def add_arguments(parser):
"""
Add file command specific options
"""
parser.add_argument('path', help='File path in Opal file system.')
parser.add_argument('--download', '-dl', action='store_true', help='Download file, or folder (as a zip file).')
parser.add_argument('--upload', '-up', required=False, help='Upload a local file to a folder in Opal file system.')
parser.add_argument('--delete', '-dt', action='store_true', help='Delete a file on Opal file system.')
parser.add_argument('--force', '-f', action='store_true', help='Skip confirmation.')
parser.add_argument('--json', '-j', action='store_true', help='Pretty JSON formatting of the response')
def do_command(args):
"""
Execute file command
"""
# Build and send request
try:
request = opal.core.OpalClient.build(opal.core.OpalClient.LoginInfo.parse(args)).new_request()
request.fail_on_error().accept_json()
if args.verbose:
request.verbose()
# buildWithAuthentication opal file
file = OpalFile(args.path)
# send request
if args.download:
response = request.get().resource(file.get_ws()).send()
elif args.upload:
#boundary = 'OpalPythonClient'
#request.post().content_type('multipart/form-data; boundary=' + boundary).accept('text/html')
#content = '--' + boundary + '\n'
#content = content + 'Content-Disposition: form-data; name="fileToUpload"; filename="'+args.upload+'"\n\n'
#content = content + open(args.upload,'rb').read()
#content = content + '\n--' + boundary
#request.content(content)
request.content_upload(args.upload).accept('text/html').content_type('multipart/form-data')
response = request.post().resource(file.get_ws()).send()
elif args.delete:
# confirm
if args.force:
response = request.delete().resource(file.get_ws()).send()
else:
print 'Delete the file "' + args.path + '"? [y/N]: ',
confirmed = sys.stdin.readline().rstrip().strip()
if confirmed == 'y':
response = request.delete().resource(file.get_ws()).send()
else:
print 'Aborted.'
sys.exit(0)
else:
response = request.get().resource(file.get_meta_ws()).send()
# format response
res = response.content
if args.json and not args.download and not args.upload:
res = response.pretty_json()
# output to stdout
print res
except Exception, e:
print >> sys.stderr, e
sys.exit(2)
except pycurl.error, error:
print response
errno, errstr = error
print >> sys.stderr, 'An error occurred: ', errstr
sys.exit(2) | gpl-3.0 | 2,970,816,658,193,952,300 | 33.382979 | 119 | 0.573816 | false |
timkpaine/lantern | tests/plot/test_plot.py | 1 | 1272 | from mock import patch
import matplotlib
matplotlib.use('Agg')
class TestConfig:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
def test_all(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, 'line', 'matplotlib')
def test_list(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, ['line' for _ in df], 'matplotlib')
def test_dict(self):
with patch('lantern.plotting.plot_matplotlib.in_ipynb', create=True) as mock1:
import lantern as l
mock1.return_value = True
df = l.bar()
l.plot(df, {c: 'line' for c in df.columns}, 'matplotlib')
| apache-2.0 | 925,046,494,711,456,600 | 27.266667 | 86 | 0.578616 | false |
hroncok/horus | pkg/darwin/create-dmg/support/dmg-license.py | 47 | 6252 | #! /usr/bin/env python
"""
This script adds a license file to a DMG. Requires Xcode and a plain ascii text
license file.
Obviously only runs on a Mac.
Copyright (C) 2011-2013 Jared Hobbs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import sys
import tempfile
import optparse
class Path(str):
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
os.unlink(self)
def mktemp(dir=None, suffix=''):
(fd, filename) = tempfile.mkstemp(dir=dir, suffix=suffix)
os.close(fd)
return Path(filename)
def main(options, args):
dmgFile, license = args
with mktemp('.') as tmpFile:
with open(tmpFile, 'w') as f:
f.write("""data 'TMPL' (128, "LPic") {
$"1344 6566 6175 6C74 204C 616E 6775 6167"
$"6520 4944 4457 5244 0543 6F75 6E74 4F43"
$"4E54 042A 2A2A 2A4C 5354 430B 7379 7320"
$"6C61 6E67 2049 4444 5752 441E 6C6F 6361"
$"6C20 7265 7320 4944 2028 6F66 6673 6574"
$"2066 726F 6D20 3530 3030 4457 5244 1032"
$"2D62 7974 6520 6C61 6E67 7561 6765 3F44"
$"5752 4404 2A2A 2A2A 4C53 5445"
};
data 'LPic' (5000) {
$"0000 0002 0000 0000 0000 0000 0004 0000"
};
data 'STR#' (5000, "English buttons") {
$"0006 0D45 6E67 6C69 7368 2074 6573 7431"
$"0541 6772 6565 0844 6973 6167 7265 6505"
$"5072 696E 7407 5361 7665 2E2E 2E7A 4966"
$"2079 6F75 2061 6772 6565 2077 6974 6820"
$"7468 6520 7465 726D 7320 6F66 2074 6869"
$"7320 6C69 6365 6E73 652C 2063 6C69 636B"
$"2022 4167 7265 6522 2074 6F20 6163 6365"
$"7373 2074 6865 2073 6F66 7477 6172 652E"
$"2020 4966 2079 6F75 2064 6F20 6E6F 7420"
$"6167 7265 652C 2070 7265 7373 2022 4469"
$"7361 6772 6565 2E22"
};
data 'STR#' (5002, "English") {
$"0006 0745 6E67 6C69 7368 0541 6772 6565"
$"0844 6973 6167 7265 6505 5072 696E 7407"
$"5361 7665 2E2E 2E7B 4966 2079 6F75 2061"
$"6772 6565 2077 6974 6820 7468 6520 7465"
$"726D 7320 6F66 2074 6869 7320 6C69 6365"
$"6E73 652C 2070 7265 7373 2022 4167 7265"
$"6522 2074 6F20 696E 7374 616C 6C20 7468"
$"6520 736F 6674 7761 7265 2E20 2049 6620"
$"796F 7520 646F 206E 6F74 2061 6772 6565"
$"2C20 7072 6573 7320 2244 6973 6167 7265"
$"6522 2E"
};\n\n""")
with open(license, 'r') as l:
kind = 'RTF ' if license.lower().endswith('.rtf') else 'TEXT'
f.write('data \'%s\' (5000, "English") {\n' % kind)
def escape(s):
return s.strip().replace('\\', '\\\\').replace('"', '\\"')
for line in l:
if len(line) < 1000:
f.write(' "' + escape(line) + '\\n"\n')
else:
for liner in line.split('.'):
f.write(' "' + escape(liner) + '. \\n"\n')
f.write('};\n\n')
f.write("""data 'styl' (5000, "English") {
$"0003 0000 0000 000C 0009 0014 0000 0000"
$"0000 0000 0000 0000 0027 000C 0009 0014"
$"0100 0000 0000 0000 0000 0000 002A 000C"
$"0009 0014 0000 0000 0000 0000 0000"
};\n""")
os.system('hdiutil unflatten -quiet "%s"' % dmgFile)
ret = os.system('%s -a %s -o "%s"' %
(options.rez, tmpFile, dmgFile))
os.system('hdiutil flatten -quiet "%s"' % dmgFile)
if options.compression is not None:
os.system('cp %s %s.temp.dmg' % (dmgFile, dmgFile))
os.remove(dmgFile)
if options.compression == "bz2":
os.system('hdiutil convert %s.temp.dmg -format UDBZ -o %s' %
(dmgFile, dmgFile))
elif options.compression == "gz":
os.system('hdiutil convert %s.temp.dmg -format ' % dmgFile +
'UDZO -imagekey zlib-devel=9 -o %s' % dmgFile)
os.remove('%s.temp.dmg' % dmgFile)
if ret == 0:
print "Successfully added license to '%s'" % dmgFile
else:
print "Failed to add license to '%s'" % dmgFile
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.set_usage("""%prog <dmgFile> <licenseFile> [OPTIONS]
This program adds a software license agreement to a DMG file.
It requires Xcode and either a plain ascii text <licenseFile>
or a <licenseFile.rtf> with the RTF contents.
See --help for more details.""")
parser.add_option(
'--rez',
'-r',
action='store',
default='/Applications/Xcode.app/Contents/Developer/Tools/Rez',
help='The path to the Rez tool. Defaults to %default'
)
parser.add_option(
'--compression',
'-c',
action='store',
choices=['bz2', 'gz'],
default=None,
help='Optionally compress dmg using specified compression type. '
'Choices are bz2 and gz.'
)
options, args = parser.parse_args()
cond = len(args) != 2
if not os.path.exists(options.rez):
print 'Failed to find Rez at "%s"!\n' % options.rez
cond = True
if cond:
parser.print_usage()
sys.exit(1)
main(options, args)
| gpl-2.0 | -6,955,224,997,652,710,000 | 37.355828 | 79 | 0.599488 | false |
pigeonflight/strider-plone | docker/appengine/lib/google-api-python-client/oauth2client/django_orm.py | 25 | 3255 | # Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if not value:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
return base64.b64encode(pickle.dumps(value))
class FlowField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
return base64.b64encode(pickle.dumps(value))
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
| mit | 5,101,606,434,236,320,000 | 27.552632 | 80 | 0.709985 | false |
CenterForOpenScience/SHARE | share/transformers/com_peerj.py | 2 | 2280 | from share.transform.chain import ChainTransformer, Parser, Delegate, RunPython, ParseDate, ParseName, Map, ctx, Try, Subjects, IRI, Concat
class Subject(Parser):
name = ctx
class ThroughSubjects(Parser):
subject = Delegate(Subject, ctx)
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class Organization(Parser):
name = ctx
class Publisher(Parser):
agent = Delegate(Organization, ctx)
class Person(Parser):
given_name = ParseName(ctx).first
family_name = ParseName(ctx).last
class Creator(Parser):
agent = Delegate(Person, ctx)
cited_as = ctx
order_cited = ctx('index')
class WorkIdentifier(Parser):
uri = IRI(ctx)
class Article(Parser):
title = ctx.title
description = Try(ctx.description)
language = ctx.language
date_published = ParseDate(ctx.date)
date_updated = ParseDate(ctx.date)
identifiers = Map(
Delegate(WorkIdentifier),
ctx.doi,
ctx.pdf_url,
ctx.fulltext_html_url,
RunPython(lambda x: 'https://www.ncbi.nlm.nih.gov/pubmed/{}'.format(x) if x else None, Try(ctx.identifiers.pubmed)),
RunPython(lambda x: 'https://www.ncbi.nlm.nih.gov/pmc/articles/{}'.format(x) if x else None, Try(ctx.identifiers.pmc)),
)
subjects = Map(Delegate(ThroughSubjects), Subjects(ctx.subjects))
tags = Map(Delegate(ThroughTags), Try(ctx.keywords), Try(ctx.subjects))
related_agents = Concat(
Map(Delegate(Creator), ctx.author),
Map(Delegate(Publisher), ctx.publisher),
)
class Extra:
volume = Try(ctx.volume)
journal_title = Try(ctx.journal_title)
journal_abbrev = Try(ctx.journal_abbrev)
description_html = Try(ctx['description-html'])
issn = Try(ctx.issn)
class Preprint(Article):
class Extra:
modified = ParseDate(ctx.date)
subjects = ctx.subjects
identifiers = Try(ctx.identifiers)
emails = Try(ctx.author_email)
description_html = Try(ctx['description-html'])
class PeerJTransformer(ChainTransformer):
VERSION = 1
def get_root_parser(self, unwrapped, emitted_type=None, **kwargs):
if emitted_type == 'preprint':
return Preprint
return Article
| apache-2.0 | 4,054,376,328,605,593,000 | 24.054945 | 139 | 0.657018 | false |
neilpelow/wmap-django | venv/lib/python3.5/site-packages/pip/_vendor/html5lib/constants.py | 354 | 83387 | from __future__ import absolute_import, division, unicode_literals
import string
EOF = None
E = {
"null-character":
"Null character in input stream, replaced with U+FFFD.",
"invalid-codepoint":
"Invalid codepoint in stream.",
"incorrectly-placed-solidus":
"Solidus (/) incorrectly placed in tag.",
"incorrect-cr-newline-entity":
"Incorrect CR newline entity, replaced with LF.",
"illegal-windows-1252-entity":
"Entity used with illegal number (windows-1252 reference).",
"cant-convert-numeric-entity":
"Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x).",
"illegal-codepoint-for-numeric-entity":
"Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x.",
"numeric-entity-without-semicolon":
"Numeric entity didn't end with ';'.",
"expected-numeric-entity-but-got-eof":
"Numeric entity expected. Got end of file instead.",
"expected-numeric-entity":
"Numeric entity expected but none found.",
"named-entity-without-semicolon":
"Named entity didn't end with ';'.",
"expected-named-entity":
"Named entity expected. Got none.",
"attributes-in-end-tag":
"End tag contains unexpected attributes.",
'self-closing-flag-on-end-tag':
"End tag contains unexpected self-closing flag.",
"expected-tag-name-but-got-right-bracket":
"Expected tag name. Got '>' instead.",
"expected-tag-name-but-got-question-mark":
"Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)",
"expected-tag-name":
"Expected tag name. Got something else instead",
"expected-closing-tag-but-got-right-bracket":
"Expected closing tag. Got '>' instead. Ignoring '</>'.",
"expected-closing-tag-but-got-eof":
"Expected closing tag. Unexpected end of file.",
"expected-closing-tag-but-got-char":
"Expected closing tag. Unexpected character '%(data)s' found.",
"eof-in-tag-name":
"Unexpected end of file in the tag name.",
"expected-attribute-name-but-got-eof":
"Unexpected end of file. Expected attribute name instead.",
"eof-in-attribute-name":
"Unexpected end of file in attribute name.",
"invalid-character-in-attribute-name":
"Invalid character in attribute name",
"duplicate-attribute":
"Dropped duplicate attribute on tag.",
"expected-end-of-tag-name-but-got-eof":
"Unexpected end of file. Expected = or end of tag.",
"expected-attribute-value-but-got-eof":
"Unexpected end of file. Expected attribute value.",
"expected-attribute-value-but-got-right-bracket":
"Expected attribute value. Got '>' instead.",
'equals-in-unquoted-attribute-value':
"Unexpected = in unquoted attribute",
'unexpected-character-in-unquoted-attribute-value':
"Unexpected character in unquoted attribute",
"invalid-character-after-attribute-name":
"Unexpected character after attribute name.",
"unexpected-character-after-attribute-value":
"Unexpected character after attribute value.",
"eof-in-attribute-value-double-quote":
"Unexpected end of file in attribute value (\").",
"eof-in-attribute-value-single-quote":
"Unexpected end of file in attribute value (').",
"eof-in-attribute-value-no-quotes":
"Unexpected end of file in attribute value.",
"unexpected-EOF-after-solidus-in-tag":
"Unexpected end of file in tag. Expected >",
"unexpected-character-after-solidus-in-tag":
"Unexpected character after / in tag. Expected >",
"expected-dashes-or-doctype":
"Expected '--' or 'DOCTYPE'. Not found.",
"unexpected-bang-after-double-dash-in-comment":
"Unexpected ! after -- in comment",
"unexpected-space-after-double-dash-in-comment":
"Unexpected space after -- in comment",
"incorrect-comment":
"Incorrect comment.",
"eof-in-comment":
"Unexpected end of file in comment.",
"eof-in-comment-end-dash":
"Unexpected end of file in comment (-)",
"unexpected-dash-after-double-dash-in-comment":
"Unexpected '-' after '--' found in comment.",
"eof-in-comment-double-dash":
"Unexpected end of file in comment (--).",
"eof-in-comment-end-space-state":
"Unexpected end of file in comment.",
"eof-in-comment-end-bang-state":
"Unexpected end of file in comment.",
"unexpected-char-in-comment":
"Unexpected character in comment found.",
"need-space-after-doctype":
"No space after literal string 'DOCTYPE'.",
"expected-doctype-name-but-got-right-bracket":
"Unexpected > character. Expected DOCTYPE name.",
"expected-doctype-name-but-got-eof":
"Unexpected end of file. Expected DOCTYPE name.",
"eof-in-doctype-name":
"Unexpected end of file in DOCTYPE name.",
"eof-in-doctype":
"Unexpected end of file in DOCTYPE.",
"expected-space-or-right-bracket-in-doctype":
"Expected space or '>'. Got '%(data)s'",
"unexpected-end-of-doctype":
"Unexpected end of DOCTYPE.",
"unexpected-char-in-doctype":
"Unexpected character in DOCTYPE.",
"eof-in-innerhtml":
"XXX innerHTML EOF",
"unexpected-doctype":
"Unexpected DOCTYPE. Ignored.",
"non-html-root":
"html needs to be the first start tag.",
"expected-doctype-but-got-eof":
"Unexpected End of file. Expected DOCTYPE.",
"unknown-doctype":
"Erroneous DOCTYPE.",
"expected-doctype-but-got-chars":
"Unexpected non-space characters. Expected DOCTYPE.",
"expected-doctype-but-got-start-tag":
"Unexpected start tag (%(name)s). Expected DOCTYPE.",
"expected-doctype-but-got-end-tag":
"Unexpected end tag (%(name)s). Expected DOCTYPE.",
"end-tag-after-implied-root":
"Unexpected end tag (%(name)s) after the (implied) root element.",
"expected-named-closing-tag-but-got-eof":
"Unexpected end of file. Expected end tag (%(name)s).",
"two-heads-are-not-better-than-one":
"Unexpected start tag head in existing head. Ignored.",
"unexpected-end-tag":
"Unexpected end tag (%(name)s). Ignored.",
"unexpected-start-tag-out-of-my-head":
"Unexpected start tag (%(name)s) that can be in head. Moved.",
"unexpected-start-tag":
"Unexpected start tag (%(name)s).",
"missing-end-tag":
"Missing end tag (%(name)s).",
"missing-end-tags":
"Missing end tags (%(name)s).",
"unexpected-start-tag-implies-end-tag":
"Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s).",
"unexpected-start-tag-treated-as":
"Unexpected start tag (%(originalName)s). Treated as %(newName)s.",
"deprecated-tag":
"Unexpected start tag %(name)s. Don't use it!",
"unexpected-start-tag-ignored":
"Unexpected start tag %(name)s. Ignored.",
"expected-one-end-tag-but-got-another":
"Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s).",
"end-tag-too-early":
"End tag (%(name)s) seen too early. Expected other end tag.",
"end-tag-too-early-named":
"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).",
"end-tag-too-early-ignored":
"End tag (%(name)s) seen too early. Ignored.",
"adoption-agency-1.1":
"End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm.",
"adoption-agency-1.2":
"End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm.",
"adoption-agency-1.3":
"End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm.",
"adoption-agency-4.4":
"End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm.",
"unexpected-end-tag-treated-as":
"Unexpected end tag (%(originalName)s). Treated as %(newName)s.",
"no-end-tag":
"This element (%(name)s) has no end tag.",
"unexpected-implied-end-tag-in-table":
"Unexpected implied end tag (%(name)s) in the table phase.",
"unexpected-implied-end-tag-in-table-body":
"Unexpected implied end tag (%(name)s) in the table body phase.",
"unexpected-char-implies-table-voodoo":
"Unexpected non-space characters in "
"table context caused voodoo mode.",
"unexpected-hidden-input-in-table":
"Unexpected input with type hidden in table context.",
"unexpected-form-in-table":
"Unexpected form in table context.",
"unexpected-start-tag-implies-table-voodoo":
"Unexpected start tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-end-tag-implies-table-voodoo":
"Unexpected end tag (%(name)s) in "
"table context caused voodoo mode.",
"unexpected-cell-in-table-body":
"Unexpected table cell start tag (%(name)s) "
"in the table body phase.",
"unexpected-cell-end-tag":
"Got table cell end tag (%(name)s) "
"while required end tags are missing.",
"unexpected-end-tag-in-table-body":
"Unexpected end tag (%(name)s) in the table body phase. Ignored.",
"unexpected-implied-end-tag-in-table-row":
"Unexpected implied end tag (%(name)s) in the table row phase.",
"unexpected-end-tag-in-table-row":
"Unexpected end tag (%(name)s) in the table row phase. Ignored.",
"unexpected-select-in-select":
"Unexpected select start tag in the select phase "
"treated as select end tag.",
"unexpected-input-in-select":
"Unexpected input start tag in the select phase.",
"unexpected-start-tag-in-select":
"Unexpected start tag token (%(name)s in the select phase. "
"Ignored.",
"unexpected-end-tag-in-select":
"Unexpected end tag (%(name)s) in the select phase. Ignored.",
"unexpected-table-element-start-tag-in-select-in-table":
"Unexpected table element start tag (%(name)s) in the select in table phase.",
"unexpected-table-element-end-tag-in-select-in-table":
"Unexpected table element end tag (%(name)s) in the select in table phase.",
"unexpected-char-after-body":
"Unexpected non-space characters in the after body phase.",
"unexpected-start-tag-after-body":
"Unexpected start tag token (%(name)s)"
" in the after body phase.",
"unexpected-end-tag-after-body":
"Unexpected end tag token (%(name)s)"
" in the after body phase.",
"unexpected-char-in-frameset":
"Unexpected characters in the frameset phase. Characters ignored.",
"unexpected-start-tag-in-frameset":
"Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-frameset-in-frameset-innerhtml":
"Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML).",
"unexpected-end-tag-in-frameset":
"Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored.",
"unexpected-char-after-frameset":
"Unexpected non-space characters in the "
"after frameset phase. Ignored.",
"unexpected-start-tag-after-frameset":
"Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-frameset":
"Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored.",
"unexpected-end-tag-after-body-innerhtml":
"Unexpected end tag after body(innerHtml)",
"expected-eof-but-got-char":
"Unexpected non-space characters. Expected end of file.",
"expected-eof-but-got-start-tag":
"Unexpected start tag (%(name)s)"
". Expected end of file.",
"expected-eof-but-got-end-tag":
"Unexpected end tag (%(name)s)"
". Expected end of file.",
"eof-in-table":
"Unexpected end of file. Expected table content.",
"eof-in-select":
"Unexpected end of file. Expected select content.",
"eof-in-frameset":
"Unexpected end of file. Expected frameset content.",
"eof-in-script-in-script":
"Unexpected end of file. Expected script content.",
"eof-in-foreign-lands":
"Unexpected end of file. Expected foreign content",
"non-void-element-with-trailing-solidus":
"Trailing solidus not allowed on element %(name)s",
"unexpected-html-element-in-foreign-content":
"Element %(name)s not allowed in a non-html context",
"unexpected-end-tag-before-html":
"Unexpected end tag (%(name)s) before html.",
"unexpected-inhead-noscript-tag":
"Element %(name)s not allowed in a inhead-noscript context",
"eof-in-head-noscript":
"Unexpected end of file. Expected inhead-noscript content",
"char-in-head-noscript":
"Unexpected non-space character. Expected inhead-noscript content",
"XXX-undefined-error":
"Undefined error (this sucks and should be fixed)",
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset([
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
])
formattingElements = frozenset([
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
])
specialElements = frozenset([
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
])
htmlIntegrationPointElements = frozenset([
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
])
mathmlTextIntegrationPointElements = frozenset([
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
])
adjustSVGAttributes = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
adjustMathMLAttributes = {"definitionurl": "definitionURL"}
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset([
"\t",
"\n",
"\u000C",
" ",
"\r"
])
tableInsertModeElements = frozenset([
"table",
"tbody",
"tfoot",
"thead",
"tr"
])
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset([
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
])
cdataElements = frozenset(['title', 'textarea'])
rcdataElements = frozenset([
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
])
booleanAttributes = {
"": frozenset(["irrelevant"]),
"style": frozenset(["scoped"]),
"img": frozenset(["ismap"]),
"audio": frozenset(["autoplay", "controls"]),
"video": frozenset(["autoplay", "controls"]),
"script": frozenset(["defer", "async"]),
"details": frozenset(["open"]),
"datagrid": frozenset(["multiple", "disabled"]),
"command": frozenset(["hidden", "disabled", "checked", "default"]),
"hr": frozenset(["noshade"]),
"menu": frozenset(["autosubmit"]),
"fieldset": frozenset(["disabled", "readonly"]),
"option": frozenset(["disabled", "readonly", "selected"]),
"optgroup": frozenset(["disabled", "readonly"]),
"button": frozenset(["disabled", "autofocus"]),
"input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]),
"select": frozenset(["disabled", "readonly", "autofocus", "multiple"]),
"output": frozenset(["disabled", "readonly"]),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;'])
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]])
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| gpl-3.0 | 3,670,396,410,211,068,400 | 27.314771 | 94 | 0.503999 | false |
PrashntS/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause | 6,947,843,777,531,474,000 | 32.287879 | 79 | 0.572371 | false |
emCOMP/twinkle | twinkle/feature_extraction/pipelines.py | 1 | 2688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from core import FeatureExtractorRegistry
from twinkle.connectors.core import ConnectorRegistry
class FeatureExtractorPipelineFactory(object):
"""
Factory object for creating a pipeline from a file
"""
def __init__(self):
"""
"""
pass
def buildInput(self, config_data):
"""
builds an input from the ConnectorRegistry
"""
input_name = config_data["name"]
input_config = config_data["config"]
return ConnectorRegistry.buildConnector(input_name, input_config)
def buildOutput(self, config_data):
"""
builds na output from the connectorRegister
"""
output_name = config_data["name"]
output_config = config_data["config"]
return ConnectorRegistry.buildConnector(output_name, output_config)
def buildExtractor(self, config_data):
"""
"""
extractor_name = config_data["name"]
extractor_config = config_data["config"]
return FeatureExtractorRegistry.buildExtractor(extractor_name, extractor_config)
def buildFromDictionary(self,config_data):
"""
"""
if "input" not in config_data:
raise Exception("No input source was specified in the configuration data")
if "output" not in config_data:
raise Exception("No output source was specified in the configuration data")
#build input
input_data = config_data["input"]
input = self.buildInput(input_data)
# build output
output_data = config_data["output"]
output = self.buildOutput(output_data)
# create the pipeline
pipeline = FeatureExtractorPipeline(input, output)
# get feature extractors
extractors = config_data["extractors"]
# add each extractor
for extractor_config in extractors:
extractor = self.buildExtractor(extractor_config)
pipeline.addExtractor(extractor)
return pipeline
class FeatureExtractorPipeline(object):
"""
Simple feature extractor pipeline.
Needs a lot of features in the future such as dependency graphs to resolve some of the intermediates
and the ability to do second passes for items which need to be normalized.
"""
def __init__(self, input, output):
self.feature_extractors = []
self.input = input
self.output = output
def addExtractor(self, extractor):
"""
add Extractor to the pipeline
"""
self.feature_extractors.append(extractor)
def run(self):
"""
runs the pipeline
"""
processed_items = []
# iterate through each item
for item in self.input:
item_cookie = { "tweet": item, "text": item.text}
output = {}
# first do preprossing
for extractor in self.feature_extractors:
extractor.extract(item, item_cookie, output)
print output
# write output
self.output.write(output)
| mit | -8,306,893,356,912,068,000 | 19.363636 | 101 | 0.71131 | false |
wfxiang08/django178 | tests/inspectdb/tests.py | 6 | 11721 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from unittest import skipUnless
from django.core.management import call_command
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.utils.six import PY3, StringIO
class InspectDBTestCase(TestCase):
def test_stealth_table_name_filter_option(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
error_message = "inspectdb has examined a table that should have been filtered out."
# contrib.contenttypes is one of the apps always installed when running
# the Django test suite, check that one of its tables hasn't been
# inspected
self.assertNotIn("class DjangoContentType(models.Model):", out.getvalue(), msg=error_message)
def make_field_type_asserter(self):
"""Call inspectdb and return a function to validate a field type in its output"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
def assertFieldType(name, definition):
out_def = re.search(r'^\s*%s = (models.*)$' % name, output, re.MULTILINE).groups()[0]
self.assertEqual(definition, out_def)
return assertFieldType
def test_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
# Inspecting Oracle DB doesn't produce correct results (#19884):
# - it gets max_length wrong: it returns a number of bytes.
# - it reports fields as blank=True when they aren't.
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('char_field', "models.CharField(max_length=10)")
assertFieldType('comma_separated_int_field', "models.CharField(max_length=99)")
assertFieldType('date_field', "models.DateField()")
assertFieldType('date_time_field', "models.DateTimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('email_field', "models.CharField(max_length=75)")
assertFieldType('file_field', "models.CharField(max_length=100)")
assertFieldType('file_path_field', "models.CharField(max_length=100)")
if connection.features.can_introspect_ip_address_field:
assertFieldType('ip_address_field', "models.GenericIPAddressField()")
assertFieldType('gen_ip_adress_field', "models.GenericIPAddressField()")
elif (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('ip_address_field', "models.CharField(max_length=15)")
assertFieldType('gen_ip_adress_field', "models.CharField(max_length=39)")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('slug_field', "models.CharField(max_length=50)")
if not connection.features.interprets_empty_strings_as_nulls:
assertFieldType('text_field', "models.TextField()")
if connection.features.can_introspect_time_field:
assertFieldType('time_field', "models.TimeField()")
if (connection.features.can_introspect_max_length and
not connection.features.interprets_empty_strings_as_nulls):
assertFieldType('url_field', "models.CharField(max_length=200)")
def test_number_field_types(self):
"""Test introspection of various Django field types"""
assertFieldType = self.make_field_type_asserter()
if not connection.features.can_introspect_autofield:
assertFieldType('id', "models.IntegerField(primary_key=True) # AutoField?")
if connection.features.can_introspect_big_integer_field:
assertFieldType('big_int_field', "models.BigIntegerField()")
else:
assertFieldType('big_int_field', "models.IntegerField()")
if connection.features.can_introspect_boolean_field:
assertFieldType('bool_field', "models.BooleanField()")
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.NullBooleanField()")
else:
assertFieldType('null_bool_field', "models.BooleanField()")
else:
assertFieldType('bool_field', "models.IntegerField()")
if connection.features.can_introspect_null:
assertFieldType('null_bool_field', "models.IntegerField(blank=True, null=True)")
else:
assertFieldType('null_bool_field', "models.IntegerField()")
if connection.features.can_introspect_decimal_field:
assertFieldType('decimal_field', "models.DecimalField(max_digits=6, decimal_places=1)")
else: # Guessed arguments on SQLite, see #5014
assertFieldType('decimal_field', "models.DecimalField(max_digits=10, decimal_places=5) "
"# max_digits and decimal_places have been guessed, "
"as this database handles decimal fields as float")
assertFieldType('float_field', "models.FloatField()")
assertFieldType('int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
assertFieldType('pos_int_field', "models.PositiveIntegerField()")
else:
assertFieldType('pos_int_field', "models.IntegerField()")
if connection.features.can_introspect_positive_integer_field:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.PositiveSmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.PositiveIntegerField()")
else:
if connection.features.can_introspect_small_integer_field:
assertFieldType('pos_small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('pos_small_int_field', "models.IntegerField()")
if connection.features.can_introspect_small_integer_field:
assertFieldType('small_int_field', "models.SmallIntegerField()")
else:
assertFieldType('small_int_field', "models.IntegerField()")
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_attribute_name_not_python_keyword(self):
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated an attribute name which is a python keyword"
# Recursive foreign keys should be set to 'self'
self.assertIn("parent = models.ForeignKey('self')", output)
self.assertNotIn("from = models.ForeignKey(InspectdbPeople)", output, msg=error_message)
# As InspectdbPeople model is defined after InspectdbMessage, it should be quoted
self.assertIn("from_field = models.ForeignKey('InspectdbPeople', db_column='from_id')",
output)
self.assertIn("people_pk = models.ForeignKey(InspectdbPeople, primary_key=True)",
output)
self.assertIn("people_unique = models.ForeignKey(InspectdbPeople, unique=True)",
output)
def test_digits_column_name_introspection(self):
"""Introspection of column names consist/start with digits (#16536/#17676)"""
out = StringIO()
# Lets limit the introspection to tables created for models of this
# application
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_'),
stdout=out)
output = out.getvalue()
error_message = "inspectdb generated a model field name which is a number"
self.assertNotIn(" 123 = models.CharField", output, msg=error_message)
self.assertIn("number_123 = models.CharField", output)
error_message = "inspectdb generated a model field name which starts with a digit"
self.assertNotIn(" 4extra = models.CharField", output, msg=error_message)
self.assertIn("number_4extra = models.CharField", output)
self.assertNotIn(" 45extra = models.CharField", output, msg=error_message)
self.assertIn("number_45extra = models.CharField", output)
def test_special_column_name_introspection(self):
"""
Introspection of column names containing special characters,
unsuitable for Python identifiers
"""
out = StringIO()
call_command('inspectdb', stdout=out)
output = out.getvalue()
base_name = 'Field' if not connection.features.uppercases_column_names else 'field'
self.assertIn("field = models.IntegerField()", output)
self.assertIn("field_field = models.IntegerField(db_column='%s_')" % base_name, output)
self.assertIn("field_field_0 = models.IntegerField(db_column='%s__')" % base_name, output)
self.assertIn("field_field_1 = models.IntegerField(db_column='__field')", output)
self.assertIn("prc_x = models.IntegerField(db_column='prc(%) x')", output)
if PY3:
# Python 3 allows non-ASCII identifiers
self.assertIn("tamaño = models.IntegerField()", output)
else:
self.assertIn("tama_o = models.IntegerField(db_column='tama\\xf1o')", output)
def test_managed_models(self):
"""Test that by default the command generates models with `Meta.managed = False` (#14305)"""
out = StringIO()
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.longMessage = False
self.assertIn(" managed = False", output, msg='inspectdb should generate unmanaged models.')
@skipUnless(connection.vendor == 'sqlite',
"Only patched sqlite's DatabaseIntrospection.data_types_reverse for this test")
def test_custom_fields(self):
"""
Introspection of columns with a custom field (#21090)
"""
out = StringIO()
orig_data_types_reverse = connection.introspection.data_types_reverse
try:
connection.introspection.data_types_reverse = {
'text': 'myfields.TextField',
'bigint': 'BigIntegerField',
}
call_command('inspectdb',
table_name_filter=lambda tn: tn.startswith('inspectdb_columntypes'),
stdout=out)
output = out.getvalue()
self.assertIn("text_field = myfields.TextField()", output)
self.assertIn("big_int_field = models.BigIntegerField()", output)
finally:
connection.introspection.data_types_reverse = orig_data_types_reverse
| bsd-3-clause | 1,477,715,790,934,979,600 | 50.858407 | 107 | 0.638908 | false |
OpenUpgrade-dev/OpenUpgrade | addons/crm_helpdesk/report/__init__.py | 442 | 1083 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,441,730,170,282,298,000 | 42.32 | 79 | 0.611265 | false |
OpenSourcePolicyCenter/PolicyBrain | webapp/apps/register/tests.py | 2 | 2406 | from django.test import TestCase
import pytest
from django.test import Client
from django.core.urlresolvers import reverse
from webapp.apps.register.models import Subscriber
# run this with `py.test --pdb -s -m register`
# actually no. `py.test -q webapp/apps/register/tests.py`
@pytest.mark.register
class RegisterTestCase(TestCase):
def test_initial_subscribe(self):
subscriber = Subscriber.objects.create(
email='test@example.com',
)
self.assertFalse(subscriber.active)
def test_post_registration_email(self):
c = Client()
response = c.post(reverse('about'),
{'email': 'afarrell+test1@continuum.io'})
subscriber = Subscriber.objects.get(
email='afarrell+test1@continuum.io')
self.assertFalse(subscriber.active)
def test_confirm_link_correct(self):
subscriber = Subscriber.objects.create(
email='test@example.com',
)
self.assertEqual(subscriber.confirm_url("http://ospc-taxes.org"),
("http://ospc-taxes.org/register/?k={}"
.format(subscriber.confirm_key)))
# Tests to write:
# User enters in a username that already exists, then changes it and
# resubmits
# User enters an email that is the same as an existing email.
# def test_patching_works(self):
# with patch('django.core.mail.send_mail') as mocked_send_mail:
# from django.core.mail import send_mail
# send_mail(subject="foo", message="bar",
# from_email="andrew <amfarrell@mit.edu>",
# recipient_list = ['farrell <afarrell@mit.edu>',])
# self.assertTrue(mocked_send_mail.called)
#
# def test_mail_is_sent(self):
# with patch('django.core.mail.send_mail') as mocked_send_mail:
# from webapp.apps.register.models import Subscriber
# subscriber = Subscriber.objects.create(
# email = 'test@example.com',
# )
# subscriber.save()
# self.assertFalse(subscriber.active)
# self.assertTrue(mocked_send_mail.called)
# self.assertEqual(mocked_send_mail.call_args['recipient_list'],
# 'test@example.com')
# self.assertIn(mocked_send_mail.call_args['message'],
# subscriber.confirm_key)
| mit | 4,057,671,585,503,592,400 | 37.806452 | 75 | 0.608063 | false |
stormi/tsunami | src/secondaires/crafting/fonctions/a_rang.py | 1 | 3868 | # -*-coding:Utf-8 -*
# Copyright (c) 2015 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction a_rang."""
from primaires.format.fonctions import supprimer_accents
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Retourne vrai si le personnage a le rang indiqué dans la guilde."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.a_rang, "Personnage", "str")
cls.ajouter_types(cls.a_rang, "Personnage", "str", "str")
@staticmethod
def a_rang(personnage, cle_guilde, cle_rang=""):
"""Retourne vrai si le personnage est du rang indiqué.
Vous pouvez ne préciser que le personnage et la clé de la
guilde pour savoir si le personnage est membre de la guilde,
à quelque rang que ce soit. Consultez les exemples
ci-dessous. Si vous précisez une clé de rang en troisième
paramètre, cette fonction retourne vrai si le personnage est
de ce rang ou de rang supérieur.
Paramètres à préciser :
* personnage : le personnage (membre ou non)
* cle_guilde : la clé de la guilde (une chaîne)
* cle_rang (optionnel) : la clé de rang (une chaîne)
Exemples d'utilisation :
# On admet une guilde 'forgerons' avec les rangs suivants :
# - apprenti
# - compagnon
# - maitre
si a_rang(personnage, "forgerons"):
# personnage est membre de la guilde
sinon:
# personnage n'est ni apprenti, ni compagnon, ni maître
finsi
si a_rang(personnage, "forgerons", "compagnon"):
# personnage est soit compagnon soit maitre
...
finsi
"""
cle_guilde = cle_guilde.lower()
if cle_guilde not in importeur.crafting.guildes:
raise ErreurExecution("La guilde {} n'existe pas".format(
repr(cle_guilde)))
guilde = importeur.crafting.guildes[cle_guilde]
if personnage not in guilde.membres:
return False
if cle_rang == "":
return personnage in guilde.membres
rang = guilde.get_rang(cle_rang)
à_rang = guilde.membres[personnage].rang
return rang in p_rang.rangs_parents
| bsd-3-clause | -1,823,801,531,649,955,300 | 39.515789 | 79 | 0.686672 | false |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/tests/test_binaries.py | 1 | 40028 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import logging
import os
import random
import tempfile
import unittest
import torch
from fairseq import options
from fairseq_cli import train
from fairseq_cli import eval_lm
from fairseq_cli import validate
from tests.utils import (
create_dummy_data,
preprocess_lm_data,
preprocess_translation_data,
train_translation_model,
generate_main,
)
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'],
)
self.assertTrue(
'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)
)
train_translation_model(
data_dir, 'fconv_iwslt_de_en',
['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, [
'--sampling',
'--temperature', '2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topk', '3',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topp', '0.2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--diversity-rate', '0.5',
'--beam', '6',
])
with self.assertRaises(ValueError):
generate_main(data_dir, [
'--diverse-beam-groups', '4',
'--match-source-len',
])
generate_main(data_dir, ['--prefix-size', '2'])
generate_main(data_dir, ['--retain-dropout'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', [
'--eval-bleu',
'--eval-bleu-print-samples',
'--eval-bleu-remove-bpe',
'--eval-bleu-detok', 'space',
'--eval-bleu-args', '{"beam": 4, "min_len": 10}',
])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', [
'--encoder-layers', '2',
'--encoder-bidirectional',
'--encoder-hidden-size', '16',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--decoder-layers', '2',
])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch='multilingual_transformer',
task='multilingual_translation',
extra_flags=[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
] + enc_ltok_flag + dec_ltok_flag,
lang_flags=['--lang-pairs', 'in-out,out-in'],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
'--task', 'multilingual_translation',
'--lang-pairs', 'in-out,out-in',
'--source-lang', 'in',
'--target-lang', 'out',
] + enc_ltok_flag + dec_ltok_flag,
)
def test_translation_multi_simple_epoch(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_translation_multi_simple_epoch_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch='transformer',
task='translation_multi_simple_epoch',
extra_flags=[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--sampling-method', 'temperature',
'--sampling-temperature', '1.5',
'--virtual-epoch-size', '1000',
] + enc_ltok_flag + dec_ltok_flag,
lang_flags=['--lang-pairs', 'in-out,out-in'],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
'--task', 'translation_multi_simple_epoch',
'--lang-pairs', 'in-out,out-in',
'--source-lang', 'in',
'--target-lang', 'out',
] + enc_ltok_flag + dec_ltok_flag,
)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--no-cross-attention',
'--cross-self-attention',
], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'lightweight',
'--decoder-conv-type', 'lightweight',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'dynamic',
'--decoder-conv-type', 'dynamic',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', [
'--apply-bert-init',
'--criterion', 'nat_loss',
'--noise', 'full_mask',
'--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '0',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
# def test_nat_crf_transformer(self):
# with contextlib.redirect_stdout(StringIO()):
# with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir:
# create_dummy_data(data_dir)
# preprocess_translation_data(data_dir, ['--joined-dictionary'])
# train_translation_model(data_dir, 'nacrf_transformer', [
# '--apply-bert-init', '--criterion',
# 'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
# '--length-loss-factor', '0.1',
# '--word-ins-loss-factor', '0.5',
# '--crf-lowrank-approx', '1',
# '--crf-beam-approx', '1'
# ], task='translation_lev')
# generate_main(data_dir, [
# '--task', 'translation_lev',
# '--iter-decode-max-iter', '0',
# '--iter-decode-eos-penalty', '0',
# '--print-step',
# ])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--stochastic-approx',
'--dae-ratio', '0.5', '--train-step', '3'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', [
'--apply-bert-init', '--criterion', 'nat_loss', '--noise',
'random_mask'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--task', 'translation_moe',
'--user-dir', 'examples/translation_moe/src',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir, [
'--task', 'translation_moe',
'--user-dir', 'examples/translation_moe/src',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--gen-expert', '0'
])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(
data_dir,
'transformer_align',
[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--load-alignments',
'--alignment-layer', '1',
'--criterion', 'label_smoothed_cross_entropy_with_alignment'
],
run_validation=True,
)
generate_main(data_dir)
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
'--encoder-layers', '[(128, 3)] * 2',
'--decoder-layers', '[(128, 3)] * 2',
'--decoder-attention', 'True',
'--encoder-attention', 'False',
'--gated-attention', 'True',
'--self-attention', 'True',
'--project-input', 'True',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--multihead-self-attention-nheads', '2'
]
train_translation_model(data_dir, 'fconv_self_att_wp', config)
generate_main(data_dir)
# fusion model
os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))
config.extend([
'--pretrained', 'True',
'--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'),
'--save-dir', os.path.join(data_dir, 'fusion_model'),
])
train_translation_model(data_dir, 'fconv_self_att_wp', config)
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm', [
'--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
'--decoder-embed-dim', '280',
'--optimizer', 'nag',
'--lr', '0.1',
])
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lstm_lm_residuals(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm_residuals') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lstm_lm', ['--add-bos-token', '--residuals'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def test_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(data_dir, "roberta_base")
def test_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
preprocess_lm_data(os.path.join(data_dir, 'label'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes)
def test_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_regression_single") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target'])
def test_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_regression_multiple") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target'])
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=('--encoder-learned-pos',) if learned_pos_emb else ()
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
] + (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb else []
) + (['--init-encoder-only'] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
] + list(extra_args),
)
train.main(train_args)
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_optimizers') as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']
last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(data_dir, 'lstm', [
'--required-batch-size-multiple', '1',
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', optimizer,
])
generate_main(data_dir)
def create_dummy_roberta_head_data(data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False):
input_dir = 'input0'
def _create_dummy_data(filename):
random_data = torch.rand(num_examples * maxlen)
input_data = 97 + torch.floor(26 * random_data).int()
if regression:
output_data = torch.rand((num_examples, num_classes))
else:
output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int()
with open(os.path.join(data_dir, input_dir, filename+'.out'), 'w') as f_in:
label_filename = filename+'.label' if regression else filename+'.out'
with open(os.path.join(data_dir, 'label', label_filename), 'w') as f_out:
offset = 0
for i in range(num_examples):
# write example input
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, input_data[offset:offset+ex_len]))
print(ex_str, file=f_in)
# write example label
if regression:
class_str = ' '.join(map(str, output_data[i].numpy()))
print(class_str, file=f_out)
else:
class_str = 'class{}'.format(output_data[i])
print(class_str, file=f_out)
offset += ex_len
os.mkdir(os.path.join(data_dir, input_dir))
os.mkdir(os.path.join(data_dir, 'label'))
_create_dummy_data('train')
_create_dummy_data('valid')
_create_dummy_data('test')
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'masked_lm',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'masked_lm',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'sentence_prediction',
data_dir,
'--arch', arch,
'--num-classes', str(num_classes),
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'sentence_prediction',
'--max-tokens', '500',
'--max-positions', '500',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
def train_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'language_modeling',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'adaptive_loss',
'--adaptive-softmax-cutoff', '5,10,15',
'--max-tokens', '500',
'--tokens-per-sample', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
] + (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
'--task', 'language_modeling',
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--valid-subset', 'valid',
'--max-tokens', '500',
'--no-progress-bar',
]
)
validate.main(validate_args)
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--no-progress-bar',
],
)
eval_lm.main(eval_lm_args)
def train_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
] + list(extra_args),
)
train.main(train_args)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -7,813,910,436,524,079,000 | 41.002099 | 122 | 0.479364 | false |
jfhumann/servo | tests/wpt/update_css.py | 116 | 1081 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
here = os.path.split(__file__)[0]
def wpt_path(*args):
return os.path.join(here, *args)
# Imports
sys.path.append(wpt_path("harness"))
from wptrunner import wptcommandline
def update_tests(**kwargs):
from wptrunner import update
set_defaults(kwargs)
logger = update.setup_logging(kwargs, {"mach": sys.stdout})
rv = update.run_update(logger, **kwargs)
return 0 if rv is update.update.exit_clean else 1
def set_defaults(kwargs):
if kwargs["product"] is None:
kwargs["product"] = "servo"
if kwargs["config"] is None:
kwargs["config"] = wpt_path('config_css.ini')
wptcommandline.set_from_config(kwargs)
def main():
parser = wptcommandline.create_parser_update()
kwargs = vars(parser.parse_args())
return update_tests(**kwargs)
if __name__ == "__main__":
sys.exit(0 if main() else 1)
| mpl-2.0 | 1,332,775,066,866,119,400 | 24.139535 | 69 | 0.670675 | false |
bkeiren/cef | tools/check_style.py | 3 | 4018 | # Copyright (c) 2012 The Chromium Embedded Framework Authors.
# Portions copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os, re, string, sys
from file_util import *
import git_util as git
# script directory
script_dir = os.path.dirname(__file__)
# CEF root directory
cef_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
# Valid extensions for files we want to lint.
DEFAULT_LINT_WHITELIST_REGEX = r"(.*\.cpp|.*\.cc|.*\.h)"
DEFAULT_LINT_BLACKLIST_REGEX = r"$^"
try:
# depot_tools may already be in the import path.
import cpplint
import cpplint_chromium
except ImportError, e:
# Search the PATH environment variable to find the depot_tools folder.
depot_tools = None;
paths = os.environ.get('PATH').split(os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, 'cpplint_chromium.py')):
depot_tools = path
break
if depot_tools is None:
print >> sys.stderr, 'Error: could not find depot_tools in PATH.'
sys.exit(2)
# Add depot_tools to import path.
sys.path.append(depot_tools)
import cpplint
import cpplint_chromium
# The default implementation of FileInfo.RepositoryName looks for the top-most
# directory that contains a .git folder. This is a problem for CEF because the
# CEF root folder (which may have an arbitrary name) lives inside the Chromium
# src folder. Reimplement in a dumb but sane way.
def patch_RepositoryName(self):
fullname = self.FullName()
project_dir = os.path.dirname(fullname)
if os.path.exists(fullname):
root_dir = project_dir
while os.path.basename(project_dir) != "src":
project_dir = os.path.dirname(project_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
components = fullname[len(prefix) + 1:].split('/')
return string.join(["cef"] + components[1:], '/')
return fullname
def check_style(args, white_list = None, black_list = None):
""" Execute cpplint with the specified arguments. """
# Apply patches.
cpplint.FileInfo.RepositoryName = patch_RepositoryName
# Process cpplint arguments.
filenames = cpplint.ParseArguments(args)
if not white_list:
white_list = DEFAULT_LINT_WHITELIST_REGEX
white_regex = re.compile(white_list)
if not black_list:
black_list = DEFAULT_LINT_BLACKLIST_REGEX
black_regex = re.compile(black_list)
extra_check_functions = [cpplint_chromium.CheckPointerDeclarationWhitespace]
for filename in filenames:
if white_regex.match(filename):
if black_regex.match(filename):
print "Ignoring file %s" % filename
else:
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
extra_check_functions)
else:
print "Skipping file %s" % filename
print "Total errors found: %d\n" % cpplint._cpplint_state.error_count
return 1
if __name__ == "__main__":
# Start with the default parameters.
args = [
# * Disable the 'build/class' test because it errors uselessly with C
# structure pointers and template declarations.
# * Disable the 'runtime/references' test because CEF allows non-const
# arguments passed by reference.
# * Disable the 'runtime/sizeof' test because it has a high number of
# false positives and adds marginal value.
'--filter=-build/class,-runtime/references,-runtime/sizeof',
]
# Add anything passed on the command-line.
args += sys.argv[1:]
# Pre-process the arguments before passing to the linter.
new_args = []
changed = []
for arg in args:
if arg == '--changed':
# Add any changed files.
changed = git.get_changed_files(cef_dir)
elif arg[:2] == '--' or not os.path.isdir(arg):
# Pass argument unchanged.
new_args.append(arg)
else:
# Add all files in the directory.
new_args += get_files(os.path.join(arg, '*'))
if len(changed) > 0:
new_args += changed
check_style(new_args)
| bsd-3-clause | 9,139,277,271,674,562,000 | 32.206612 | 78 | 0.688651 | false |
cbmoore/statsmodels | docs/source/plots/graphics_gofplots_qqplot.py | 38 | 1911 | # -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
from scipy import stats
from matplotlib import pyplot as plt
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
# example with the new ProbPlot class
import numpy as np
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
| bsd-3-clause | -6,187,090,041,235,930,000 | 27.102941 | 74 | 0.674516 | false |
boyuegame/kbengine | kbe/src/lib/python/Lib/distutils/tests/test_bdist_rpm.py | 71 | 4868 | """Tests for distutils.command.bdist_rpm."""
import unittest
import sys
import os
import tempfile
import shutil
from test.support import run_unittest
from distutils.core import Distribution
from distutils.command.bdist_rpm import bdist_rpm
from distutils.tests import support
from distutils.spawn import find_executable
from distutils import spawn
from distutils.errors import DistutilsExecError
SETUP_PY = """\
from distutils.core import setup
import foo
setup(name='foo', version='0.1', py_modules=['foo'],
url='xxx', author='xxx', author_email='xxx')
"""
class BuildRpmTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def setUp(self):
try:
sys.executable.encode("UTF-8")
except UnicodeEncodeError:
raise unittest.SkipTest("sys.executable is not encodable to UTF-8")
super(BuildRpmTestCase, self).setUp()
self.old_location = os.getcwd()
self.old_sys_argv = sys.argv, sys.argv[:]
def tearDown(self):
os.chdir(self.old_location)
sys.argv = self.old_sys_argv[0]
sys.argv[:] = self.old_sys_argv[1]
super(BuildRpmTestCase, self).tearDown()
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_quiet(self):
# let's create a package
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
# running in quiet mode
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
# XXX I am unable yet to make this test work without
# spurious sdtout/stderr output under Mac OS X
@unittest.skipUnless(sys.platform.startswith('linux'),
'spurious sdtout/stderr output under Mac OS X')
# http://bugs.python.org/issue1533164
@unittest.skipIf(find_executable('rpm') is None,
'the rpm command is not found')
@unittest.skipIf(find_executable('rpmbuild') is None,
'the rpmbuild command is not found')
def test_no_optimize_flag(self):
# let's create a package that brakes bdist_rpm
tmp_dir = self.mkdtemp()
pkg_dir = os.path.join(tmp_dir, 'foo')
os.mkdir(pkg_dir)
self.write_file((pkg_dir, 'setup.py'), SETUP_PY)
self.write_file((pkg_dir, 'foo.py'), '#')
self.write_file((pkg_dir, 'MANIFEST.in'), 'include foo.py')
self.write_file((pkg_dir, 'README'), '')
dist = Distribution({'name': 'foo', 'version': '0.1',
'py_modules': ['foo'],
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'})
dist.script_name = 'setup.py'
os.chdir(pkg_dir)
sys.argv = ['setup.py']
cmd = bdist_rpm(dist)
cmd.fix_python = True
cmd.quiet = 1
cmd.ensure_finalized()
cmd.run()
dist_created = os.listdir(os.path.join(pkg_dir, 'dist'))
self.assertIn('foo-0.1-1.noarch.rpm', dist_created)
# bug #2945: upload ignores bdist_rpm files
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.src.rpm'), dist.dist_files)
self.assertIn(('bdist_rpm', 'any', 'dist/foo-0.1-1.noarch.rpm'), dist.dist_files)
os.remove(os.path.join(pkg_dir, 'dist', 'foo-0.1-1.noarch.rpm'))
def test_suite():
return unittest.makeSuite(BuildRpmTestCase)
if __name__ == '__main__':
run_unittest(test_suite())
| lgpl-3.0 | -7,839,338,785,633,518,000 | 35.328358 | 89 | 0.582375 | false |
2013Commons/hue | desktop/core/ext-py/Django-1.4.5/django/db/models/sql/query.py | 53 | 85934 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_unicode
from django.utils.tree import Node
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.db.models.expressions import ExpressionNode
from django.db.models.fields import FieldDoesNotExist
from django.db.models.query_utils import InvalidQuery
from django.db.models.sql import aggregates as base_aggregates_module
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import EmptyResultSet, Empty, MultiJoin
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import (WhereNode, Constraint, EverythingNode,
ExtraWhere, AND, OR)
from django.core.exceptions import FieldError
__all__ = ['Query', 'RawQuery']
class RawQuery(object):
"""
A single raw SQL query
"""
def __init__(self, sql, using, params=None):
self.params = params or ()
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.aggregate_select = {}
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
return [converter(column_meta[0])
for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<RawQuery: %r>" % (self.sql % tuple(self.params))
def _execute_query(self):
self.cursor = connections[self.using].cursor()
self.cursor.execute(self.sql, self.params)
class Query(object):
"""
A single SQL query.
"""
# SQL join types. These are part of the class because their string forms
# vary from database to database and can be customised by a subclass.
INNER = 'INNER JOIN'
LOUTER = 'LEFT OUTER JOIN'
alias_prefix = 'T'
query_terms = QUERY_TERMS
aggregates_module = base_aggregates_module
compiler = 'SQLCompiler'
def __init__(self, model, where=WhereNode):
self.model = model
self.alias_refcount = SortedDict()
self.alias_map = {} # Maps alias to join information
self.table_map = {} # Maps table names to list of aliases.
self.join_map = {}
self.rev_join_map = {} # Reverse of join_map.
self.quote_cache = {}
self.default_cols = True
self.default_ordering = True
self.standard_ordering = True
self.ordering_aliases = []
self.select_fields = []
self.related_select_fields = []
self.dupe_avoidance = {}
self.used_aliases = set()
self.filter_is_sticky = False
self.included_inherited_models = {}
# SQL-related attributes
self.select = []
self.tables = [] # Aliases in the order they are created.
self.where = where()
self.where_class = where
self.group_by = None
self.having = where()
self.order_by = []
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.distinct = False
self.distinct_fields = []
self.select_for_update = False
self.select_for_update_nowait = False
self.select_related = False
self.related_select_cols = []
# SQL aggregate-related attributes
self.aggregates = SortedDict() # Maps alias -> SQL aggregate function
self.aggregate_select_mask = None
self._aggregate_select_cache = None
# Arbitrary maximum limit for select_related. Prevents infinite
# recursion. Can be changed by the depth parameter to select_related().
self.max_depth = 5
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = SortedDict() # Maps col_alias -> (col_sql, params).
self.extra_select_mask = None
self._extra_select_cache = None
self.extra_tables = ()
self.extra_order_by = ()
# A tuple that is a set of model field names and either True, if these
# are the fields to defer, or False if these are the only fields to
# load.
self.deferred_loading = (set(), True)
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
subsituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
result = self.clone(memo=memo)
memo[id(self)] = result
return result
def __getstate__(self):
"""
Pickling support.
"""
obj_dict = self.__dict__.copy()
obj_dict['related_select_fields'] = []
obj_dict['related_select_cols'] = []
# Fields can't be pickled, so if a field list has been
# specified, we pickle the list of field names instead.
# None is also a possible value; that can pass as-is
obj_dict['select_fields'] = [
f is not None and f.name or None
for f in obj_dict['select_fields']
]
return obj_dict
def __setstate__(self, obj_dict):
"""
Unpickling support.
"""
# Rebuild list of field instances
opts = obj_dict['model']._meta
obj_dict['select_fields'] = [
name is not None and opts.get_field(name) or None
for name in obj_dict['select_fields']
]
self.__dict__.update(obj_dict)
def prepare(self):
return self
def get_compiler(self, using=None, connection=None):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
# Check that the compiler will be able to execute the query
for alias, aggregate in self.aggregate_select.items():
connection.ops.check_aggregate_support(aggregate)
return connection.ops.compiler(self.compiler)(self, connection, using)
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
return self.model._meta
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
obj.__class__ = klass or self.__class__
obj.model = self.model
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.table_map = self.table_map.copy()
obj.join_map = self.join_map.copy()
obj.rev_join_map = self.rev_join_map.copy()
obj.quote_cache = {}
obj.default_cols = self.default_cols
obj.default_ordering = self.default_ordering
obj.standard_ordering = self.standard_ordering
obj.included_inherited_models = self.included_inherited_models.copy()
obj.ordering_aliases = []
obj.select_fields = self.select_fields[:]
obj.related_select_fields = self.related_select_fields[:]
obj.dupe_avoidance = self.dupe_avoidance.copy()
obj.select = self.select[:]
obj.tables = self.tables[:]
obj.where = copy.deepcopy(self.where, memo=memo)
obj.where_class = self.where_class
if self.group_by is None:
obj.group_by = None
else:
obj.group_by = self.group_by[:]
obj.having = copy.deepcopy(self.having, memo=memo)
obj.order_by = self.order_by[:]
obj.low_mark, obj.high_mark = self.low_mark, self.high_mark
obj.distinct = self.distinct
obj.distinct_fields = self.distinct_fields[:]
obj.select_for_update = self.select_for_update
obj.select_for_update_nowait = self.select_for_update_nowait
obj.select_related = self.select_related
obj.related_select_cols = []
obj.aggregates = copy.deepcopy(self.aggregates, memo=memo)
if self.aggregate_select_mask is None:
obj.aggregate_select_mask = None
else:
obj.aggregate_select_mask = self.aggregate_select_mask.copy()
# _aggregate_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both aggregates and
# _aggregate_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._aggregate_select_cache = None
obj.max_depth = self.max_depth
obj.extra = self.extra.copy()
if self.extra_select_mask is None:
obj.extra_select_mask = None
else:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is None:
obj._extra_select_cache = None
else:
obj._extra_select_cache = self._extra_select_cache.copy()
obj.extra_tables = self.extra_tables
obj.extra_order_by = self.extra_order_by
obj.deferred_loading = copy.deepcopy(self.deferred_loading, memo=memo)
if self.filter_is_sticky and self.used_aliases:
obj.used_aliases = self.used_aliases.copy()
else:
obj.used_aliases = set()
obj.filter_is_sticky = False
obj.__dict__.update(kwargs)
if hasattr(obj, '_setup_query'):
obj._setup_query()
return obj
def convert_values(self, value, field, connection):
"""Convert the database-returned value into a type that is consistent
across database backends.
By default, this defers to the underlying backend operations, but
it can be overridden by Query classes for specific backends.
"""
return connection.ops.convert_values(value, field)
def resolve_aggregate(self, value, aggregate, connection):
"""Resolve the value of aggregates returned by the database to
consistent (and reasonable) types.
This is required because of the predisposition of certain backends
to return Decimal and long types when they are not needed.
"""
if value is None:
if aggregate.is_ordinal:
return 0
# Return None as-is
return value
elif aggregate.is_ordinal:
# Any ordinal aggregate (e.g., count) returns an int
return int(value)
elif aggregate.is_computed:
# Any computed aggregate (e.g., avg) returns a float
return float(value)
else:
# Return value depends on the type of the field being processed.
return self.convert_values(value, aggregate.field, connection)
def get_aggregation(self, using):
"""
Returns the dictionary with the values of the existing aggregations.
"""
if not self.aggregate_select:
return {}
# If there is a group by clause, aggregating does not add useful
# information but retrieves only the first row. Aggregate
# over the subquery instead.
if self.group_by is not None:
from django.db.models.sql.subqueries import AggregateQuery
query = AggregateQuery(self.model)
obj = self.clone()
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
for alias, aggregate in self.aggregate_select.items():
if aggregate.is_summary:
query.aggregate_select[alias] = aggregate
del obj.aggregate_select[alias]
try:
query.add_subquery(obj, using)
except EmptyResultSet:
return dict(
(alias, None)
for alias in query.aggregate_select
)
else:
query = self
self.select = []
self.default_cols = False
self.extra = {}
self.remove_inherited_models()
query.clear_ordering(True)
query.clear_limits()
query.select_for_update = False
query.select_related = False
query.related_select_cols = []
query.related_select_fields = []
result = query.get_compiler(using).execute_sql(SINGLE)
if result is None:
result = [None for q in query.aggregate_select.items()]
return dict([
(alias, self.resolve_aggregate(val, aggregate, connection=connections[using]))
for (alias, aggregate), val
in zip(query.aggregate_select.items(), result)
])
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
if len(self.select) > 1 or self.aggregate_select or (self.distinct and self.distinct_fields):
# If a select clause exists, then the query has already started to
# specify the columns that are to be returned.
# In this case, we need to use a subquery to evaluate the count.
from django.db.models.sql.subqueries import AggregateQuery
subquery = obj
subquery.clear_ordering(True)
subquery.clear_limits()
obj = AggregateQuery(obj.model)
try:
obj.add_subquery(subquery, using=using)
except EmptyResultSet:
# add_subquery evaluates the query, if it's an EmptyResultSet
# then there are can be no results, and therefore there the
# count is obviously 0
return 0
obj.add_count_column()
number = obj.get_aggregation(using=using)[None]
# Apply offset and limit constraints manually, since using LIMIT/OFFSET
# in SQL (in variants that provide them) doesn't change the COUNT
# output.
number = max(0, number - self.low_mark)
if self.high_mark is not None:
number = min(number, self.high_mark - self.low_mark)
return number
def has_results(self, using):
q = self.clone()
q.add_extra({'a': 1}, None, None, None, None, None)
q.select = []
q.select_fields = []
q.default_cols = False
q.select_related = False
q.set_extra_mask(('a',))
q.set_aggregate_mask(())
q.clear_ordering(True)
q.set_limits(high=1)
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(SINGLE))
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
assert self.model == rhs.model, \
"Cannot combine queries on two different base models."
assert self.can_filter(), \
"Cannot combine queries once a slice has been taken."
assert self.distinct == rhs.distinct, \
"Cannot combine a unique query with a non-unique query."
assert self.distinct_fields == rhs.distinct_fields, \
"Cannot combine queries with different distinct fields."
self.remove_inherited_models()
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
used = set()
conjunction = (connector == AND)
first = True
for alias in rhs.tables:
if not rhs.alias_refcount[alias]:
# An unused alias.
continue
promote = (rhs.alias_map[alias][JOIN_TYPE] == self.LOUTER)
lhs, table, lhs_col, col = rhs.rev_join_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
lhs = change_map.get(lhs, lhs)
new_alias = self.join((lhs, table, lhs_col, col),
(conjunction and not first), used, promote, not conjunction)
used.add(new_alias)
change_map[alias] = new_alias
first = False
# So that we don't exclude valid results in an "or" query combination,
# all joins exclusive to either the lhs or the rhs must be converted
# to an outer join.
if not conjunction:
l_tables = set(self.tables)
r_tables = set(rhs.tables)
# Update r_tables aliases.
for alias in change_map:
if alias in r_tables:
# r_tables may contain entries that have a refcount of 0
# if the query has references to a table that can be
# trimmed because only the foreign key is used.
# We only need to fix the aliases for the tables that
# actually have aliases.
if rhs.alias_refcount[alias]:
r_tables.remove(alias)
r_tables.add(change_map[alias])
# Find aliases that are exclusive to rhs or lhs.
# These are promoted to outer joins.
outer_tables = (l_tables | r_tables) - (l_tables & r_tables)
for alias in outer_tables:
# Again, some of the tables won't have aliases due to
# the trimming of unnecessary tables.
if self.alias_refcount.get(alias) or rhs.alias_refcount.get(alias):
self.promote_alias(alias, True)
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
if rhs.where:
w = copy.deepcopy(rhs.where)
w.relabel_aliases(change_map)
if not self.where:
# Since 'self' matches everything, add an explicit "include
# everything" where-constraint so that connections between the
# where clauses won't exclude valid results.
self.where.add(EverythingNode(), AND)
elif self.where:
# rhs has an empty where clause.
w = self.where_class()
w.add(EverythingNode(), AND)
else:
w = self.where_class()
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
self.select = []
for col in rhs.select:
if isinstance(col, (list, tuple)):
self.select.append((change_map.get(col[0], col[0]), col[1]))
else:
item = copy.deepcopy(col)
item.relabel_aliases(change_map)
self.select.append(item)
self.select_fields = rhs.select_fields[:]
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError("When merging querysets using 'or', you "
"cannot have extra(select=...) on both sides.")
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by and rhs.order_by[:] or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialised on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
The "target" parameter is the instance that is populated (in place).
The "callback" is a function that is called whenever a (model, field)
pair need to be added to "target". It accepts three parameters:
"target", and the model and list of fields being added for that model.
"""
field_names, defer = self.deferred_loading
if not field_names:
return
orig_opts = self.model._meta
seen = {}
must_include = {orig_opts.concrete_model: set([orig_opts.pk])}
for field_name in field_names:
parts = field_name.split(LOOKUP_SEP)
cur_model = self.model
opts = orig_opts
for name in parts[:-1]:
old_model = cur_model
source = opts.get_field_by_name(name)[0]
cur_model = source.rel.to
opts = cur_model._meta
# Even if we're "just passing through" this model, we must add
# both the current model's pk and the related reference field
# to the things we select.
must_include[old_model].add(source)
add_to_dict(must_include, cur_model, opts.pk)
field, model, _, _ = opts.get_field_by_name(parts[-1])
if model is None:
model = cur_model
add_to_dict(seen, model, field)
if defer:
# We need to load all fields for each model, except those that
# appear in "seen" (for all models that appear in "seen"). The only
# slight complexity here is handling fields that exist on parent
# models.
workset = {}
for model, values in seen.iteritems():
for field, m in model._meta.get_fields_with_model():
if field in values:
continue
add_to_dict(workset, m or model, field)
for model, values in must_include.iteritems():
# If we haven't included a model in workset, we don't add the
# corresponding must_include fields for that model, since an
# empty set means "include all fields". That's why there's no
# "else" branch here.
if model in workset:
workset[model].update(values)
for model, values in workset.iteritems():
callback(target, model, values)
else:
for model, values in must_include.iteritems():
if model in seen:
seen[model].update(values)
else:
# As we've passed through this model, but not explicitly
# included any fields, we have to make sure it's mentioned
# so that only the "must include" fields are pulled in.
seen[model] = values
# Now ensure that every model in the inheritance chain is mentioned
# in the parent list. Again, it must be mentioned to ensure that
# only "must include" fields are pulled in.
for model in orig_opts.get_parent_list():
if model not in seen:
seen[model] = set()
for model, values in seen.iteritems():
callback(target, model, values)
def deferred_to_columns_cb(self, target, model, fields):
"""
Callback used by deferred_to_columns(). The "target" parameter should
be a set instance.
"""
table = model._meta.db_table
if table not in target:
target[table] = set()
for field in fields:
target[table].add(field.column)
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
current = self.table_map.get(table_name)
if not create and current:
alias = current[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if current:
alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1)
current.append(alias)
else:
# The first occurence of a table uses the table name directly.
alias = table_name
self.table_map[alias] = [alias]
self.alias_refcount[alias] = 1
self.tables.append(alias)
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
self.alias_refcount[alias] -= amount
def promote_alias(self, alias, unconditional=False):
"""
Promotes the join type of an alias to an outer join if it's possible
for the join to contain NULL values on the left. If 'unconditional' is
False, the join is only promoted if it is nullable, otherwise it is
always promoted.
Returns True if the join was promoted by this call.
"""
if ((unconditional or self.alias_map[alias][NULLABLE]) and
self.alias_map[alias][JOIN_TYPE] != self.LOUTER):
data = list(self.alias_map[alias])
data[JOIN_TYPE] = self.LOUTER
self.alias_map[alias] = tuple(data)
return True
return False
def promote_alias_chain(self, chain, must_promote=False):
"""
Walks along a chain of aliases, promoting the first nullable join and
any joins following that. If 'must_promote' is True, all the aliases in
the chain are promoted.
"""
for alias in chain:
if self.promote_alias(alias, must_promote):
must_promote = True
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def promote_unused_aliases(self, initial_refcounts, used_aliases):
"""
Given a "before" copy of the alias_refcounts dictionary (as
'initial_refcounts') and a collection of aliases that may have been
changed or created, works out which aliases have been created since
then and which ones haven't been used and promotes all of those
aliases, plus any children of theirs in the alias tree, to outer joins.
"""
# FIXME: There's some (a lot of!) overlap with the similar OR promotion
# in add_filter(). It's not quite identical, but is very similar. So
# pulling out the common bits is something for later.
considered = {}
for alias in self.tables:
if alias not in used_aliases:
continue
if (alias not in initial_refcounts or
self.alias_refcount[alias] == initial_refcounts[alias]):
parent = self.alias_map[alias][LHS_ALIAS]
must_promote = considered.get(parent, False)
promoted = self.promote_alias(alias, must_promote)
considered[alias] = must_promote or promoted
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
assert set(change_map.keys()).intersection(set(change_map.values())) == set()
# 1. Update references in "select" (normal columns plus aliases),
# "group by", "where" and "having".
self.where.relabel_aliases(change_map)
self.having.relabel_aliases(change_map)
for columns in [self.select, self.group_by or []]:
for pos, col in enumerate(columns):
if isinstance(col, (list, tuple)):
old_alias = col[0]
columns[pos] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
for mapping in [self.aggregates]:
for key, col in mapping.items():
if isinstance(col, (list, tuple)):
old_alias = col[0]
mapping[key] = (change_map.get(old_alias, old_alias), col[1])
else:
col.relabel_aliases(change_map)
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.iteritems():
alias_data = list(self.alias_map[old_alias])
alias_data[RHS_ALIAS] = new_alias
t = self.rev_join_map[old_alias]
data = list(self.join_map[t])
data[data.index(old_alias)] = new_alias
self.join_map[t] = tuple(data)
self.rev_join_map[new_alias] = t
del self.rev_join_map[old_alias]
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
self.alias_map[new_alias] = tuple(alias_data)
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data[TABLE_NAME]]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
for pos, alias in enumerate(self.tables):
if alias == old_alias:
self.tables[pos] = new_alias
break
for key, alias in self.included_inherited_models.items():
if alias in change_map:
self.included_inherited_models[key] = change_map[alias]
# 3. Update any joins that refer to the old alias.
for alias, data in self.alias_map.iteritems():
lhs = data[LHS_ALIAS]
if lhs in change_map:
data = list(data)
data[LHS_ALIAS] = change_map[lhs]
self.alias_map[alias] = tuple(data)
def bump_prefix(self, exceptions=()):
"""
Changes the alias prefix to the next letter in the alphabet and
relabels all the aliases. Even tables that previously had no alias will
get an alias after this call (it's mostly used for nested queries and
the outer query will already be using the non-aliased table name).
Subclasses who create their own prefix should override this method to
produce a similar result (a new prefix and relabelled aliases).
The 'exceptions' parameter is a container that holds alias names which
should not be changed.
"""
current = ord(self.alias_prefix)
assert current < ord('Z')
prefix = chr(current + 1)
self.alias_prefix = prefix
change_map = SortedDict()
for pos, alias in enumerate(self.tables):
if alias in exceptions:
continue
new_alias = '%s%d' % (prefix, pos)
change_map[alias] = new_alias
self.tables[pos] = new_alias
self.change_aliases(change_map)
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
count.
"""
if self.tables:
alias = self.tables[0]
self.ref_alias(alias)
else:
alias = self.join((None, self.model._meta.db_table, None, None))
return alias
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.itervalues() if count])
def join(self, connection, always_create=False, exclusions=(),
promote=False, outer_if_first=False, nullable=False, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, lhs_col, col) where 'lhs' is either an existing
table alias or a table name. The join correspods to the SQL equivalent
of::
lhs.lhs_col = table.col
If 'always_create' is True and 'reuse' is None, a new alias is always
created, regardless of whether one already exists or not. If
'always_create' is True and 'reuse' is a set, an alias in 'reuse' that
matches the connection will be returned, if possible. If
'always_create' is False, the first existing alias that matches the
'connection' is returned, if any. Otherwise a new join is created.
If 'exclusions' is specified, it is something satisfying the container
protocol ("foo in exclusions" must work) and specifies a list of
aliases that should not be returned, even if they satisfy the join.
If 'promote' is True, the join type for the alias will be LOUTER (if
the alias previously existed, the join type will be promoted from INNER
to LOUTER, if necessary).
If 'outer_if_first' is True and a new join is created, it will have the
LOUTER join type. This is used when joining certain types of querysets
and Q-objects together.
If 'nullable' is True, the join can potentially involve NULL values and
is a candidate for promotion (to "left outer") when combining querysets.
"""
lhs, table, lhs_col, col = connection
if lhs in self.alias_map:
lhs_table = self.alias_map[lhs][TABLE_NAME]
else:
lhs_table = lhs
if reuse and always_create and table in self.table_map:
# Convert the 'reuse' to case to be "exclude everything but the
# reusable set, minus exclusions, for this table".
exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions))
always_create = False
t_ident = (lhs_table, table, lhs_col, col)
if not always_create:
for alias in self.join_map.get(t_ident, ()):
if alias not in exclusions:
if lhs_table and not self.alias_refcount[self.alias_map[alias][LHS_ALIAS]]:
# The LHS of this join tuple is no longer part of the
# query, so skip this possibility.
continue
if self.alias_map[alias][LHS_ALIAS] != lhs:
continue
self.ref_alias(alias)
if promote:
self.promote_alias(alias)
return alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(table, True)
if not lhs:
# Not all tables need to be joined to anything. No join type
# means the later columns are ignored.
join_type = None
elif promote or outer_if_first:
join_type = self.LOUTER
else:
join_type = self.INNER
join = (table, alias, join_type, lhs, lhs_col, col, nullable)
self.alias_map[alias] = join
if t_ident in self.join_map:
self.join_map[t_ident] += (alias,)
else:
self.join_map[t_ident] = (alias,)
self.rev_join_map[alias] = t_ident
return alias
def setup_inherited_models(self):
"""
If the model that is the basis for this QuerySet inherits other models,
we need to ensure that those other models have their tables included in
the query.
We do this as a separate step so that subclasses know which
tables are going to be active in the query, without needing to compute
all the select columns (this method is called from pre_sql_setup(),
whereas column determination is a later part, and side-effect, of
as_sql()).
"""
opts = self.model._meta
root_alias = self.tables[0]
seen = {None: root_alias}
# Skip all proxy to the root proxied model
proxied_model = opts.concrete_model
for field, model in opts.get_fields_with_model():
if model not in seen:
if model is proxied_model:
seen[model] = root_alias
else:
link_field = opts.get_ancestor_link(model)
seen[model] = self.join((root_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
self.included_inherited_models = seen
def remove_inherited_models(self):
"""
Undoes the effects of setup_inherited_models(). Should be called
whenever select columns (self.select) are set explicitly.
"""
for key, alias in self.included_inherited_models.items():
if key:
self.unref_alias(alias)
self.included_inherited_models = {}
def need_force_having(self, q_object):
"""
Returns whether or not all elements of this q_object need to be put
together in the HAVING clause.
"""
for child in q_object.children:
if isinstance(child, Node):
if self.need_force_having(child):
return True
else:
if child[0].split(LOOKUP_SEP)[0] in self.aggregates:
return True
return False
def add_aggregate(self, aggregate, model, alias, is_summary):
"""
Adds a single aggregate expression to the Query
"""
opts = model._meta
field_list = aggregate.lookup.split(LOOKUP_SEP)
if len(field_list) == 1 and aggregate.lookup in self.aggregates:
# Aggregate is over an annotation
field_name = field_list[0]
col = field_name
source = self.aggregates[field_name]
if not is_summary:
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (
aggregate.name, field_name, field_name))
elif ((len(field_list) > 1) or
(field_list[0] not in [i.name for i in opts.fields]) or
self.group_by is None or
not is_summary):
# If:
# - the field descriptor has more than one part (foo__bar), or
# - the field descriptor is referencing an m2m/m2o field, or
# - this is a reference to a model field (possibly inherited), or
# - this is an annotation over a model field
# then we need to explore the joins that are required.
field, source, opts, join_list, last, _ = self.setup_joins(
field_list, opts, self.get_initial_alias(), False)
# Process the join chain to see if it can be trimmed
col, _, join_list = self.trim_joins(source, join_list, last, False)
# If the aggregate references a model or field that requires a join,
# those joins must be LEFT OUTER - empty join rows must be returned
# in order for zeros to be returned for those aggregates.
for column_alias in join_list:
self.promote_alias(column_alias, unconditional=True)
col = (join_list[-1], col)
else:
# The simplest cases. No joins required -
# just reference the provided column alias.
field_name = field_list[0]
source = opts.get_field(field_name)
col = field_name
# Add the aggregate to the query
aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False,
can_reuse=None, process_extras=True, force_having=False):
"""
Add a single filter to the query. The 'filter_expr' is a pair:
(filter_string, value). E.g. ('name__contains', 'fred')
If 'negate' is True, this is an exclude() filter. It's important to
note that this method does not negate anything in the where-clause
object when inserting the filter constraints. This is because negated
filters often require multiple calls to add_filter() and the negation
should only happen once. So the caller is responsible for this (the
caller will normally be add_q(), so that as an example).
If 'trim' is True, we automatically trim the final join group (used
internally when constructing nested queries).
If 'can_reuse' is a set, we are processing a component of a
multi-component filter (e.g. filter(Q1, Q2)). In this case, 'can_reuse'
will be a set of table aliases that can be reused in this filter, even
if we would otherwise force the creation of new aliases for a join
(needed for nested Q-filters). The set is updated by this method.
If 'process_extras' is set, any extra filters returned from the table
joining process will be processed. This parameter is set to False
during the processing of extra filters to avoid infinite recursion.
"""
arg, value = filter_expr
parts = arg.split(LOOKUP_SEP)
if not parts:
raise FieldError("Cannot parse keyword query %r" % arg)
# Work out the lookup type and remove it from the end of 'parts',
# if necessary.
lookup_type = 'exact' # Default lookup type
num_parts = len(parts)
if (len(parts) > 1 and parts[-1] in self.query_terms
and arg not in self.aggregates):
# Traverse the lookup query to distinguish related fields from
# lookup types.
lookup_model = self.model
for counter, field_name in enumerate(parts):
try:
lookup_field = lookup_model._meta.get_field(field_name)
except FieldDoesNotExist:
# Not a field. Bail out.
lookup_type = parts.pop()
break
# Unless we're at the end of the list of lookups, let's attempt
# to continue traversing relations.
if (counter + 1) < num_parts:
try:
lookup_model = lookup_field.rel.to
except AttributeError:
# Not a related field. Bail out.
lookup_type = parts.pop()
break
# By default, this is a WHERE clause. If an aggregate is referenced
# in the value, the filter will be promoted to a HAVING
having_clause = False
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value.
if value is None:
if lookup_type != 'exact':
raise ValueError("Cannot use None as a query value")
lookup_type = 'isnull'
value = True
elif callable(value):
value = value()
elif isinstance(value, ExpressionNode):
# If value is a query expression, evaluate it
value = SQLEvaluator(value, self)
having_clause = value.contains_aggregate
for alias, aggregate in self.aggregates.items():
if alias in (parts[0], LOOKUP_SEP.join(parts)):
entry = self.where_class()
entry.add((aggregate, lookup_type, value), AND)
if negate:
entry.negate()
self.having.add(entry, connector)
return
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = trim or not negate
try:
field, target, opts, join_list, last, extra_filters = self.setup_joins(
parts, opts, alias, True, allow_many, allow_explicit_fk=True,
can_reuse=can_reuse, negate=negate,
process_extras=process_extras)
except MultiJoin, e:
self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]),
can_reuse)
return
table_promote = False
join_promote = False
if (lookup_type == 'isnull' and value is True and not negate and
len(join_list) > 1):
# If the comparison is against NULL, we may need to use some left
# outer joins when creating the join chain. This is only done when
# needed, as it's less efficient at the database level.
self.promote_alias_chain(join_list)
join_promote = True
# Process the join list to see if we can remove any inner joins from
# the far end (fewer tables in a query is better).
nonnull_comparison = (lookup_type == 'isnull' and value is False)
col, alias, join_list = self.trim_joins(target, join_list, last, trim,
nonnull_comparison)
if connector == OR:
# Some joins may need to be promoted when adding a new filter to a
# disjunction. We walk the list of new joins and where it diverges
# from any previous joins (ref count is 1 in the table list), we
# make the new additions (and any existing ones not used in the new
# join list) an outer join.
join_it = iter(join_list)
table_it = iter(self.tables)
join_it.next(), table_it.next()
unconditional = False
for join in join_it:
table = table_it.next()
# Once we hit an outer join, all subsequent joins must
# also be promoted, regardless of whether they have been
# promoted as a result of this pass through the tables.
unconditional = (unconditional or
self.alias_map[join][JOIN_TYPE] == self.LOUTER)
if join == table and self.alias_refcount[join] > 1:
# We have more than one reference to this join table.
# This means that we are dealing with two different query
# subtrees, so we don't need to do any join promotion.
continue
join_promote = join_promote or self.promote_alias(join, unconditional)
if table != join:
table_promote = self.promote_alias(table)
# We only get here if we have found a table that exists
# in the join list, but isn't on the original tables list.
# This means we've reached the point where we only have
# new tables, so we can break out of this promotion loop.
break
self.promote_alias_chain(join_it, join_promote)
self.promote_alias_chain(table_it, table_promote or join_promote)
if having_clause or force_having:
if (alias, col) not in self.group_by:
self.group_by.append((alias, col))
self.having.add((Constraint(alias, col, field), lookup_type, value),
connector)
else:
self.where.add((Constraint(alias, col, field), lookup_type, value),
connector)
if negate:
self.promote_alias_chain(join_list)
if lookup_type != 'isnull':
if len(join_list) > 1:
for alias in join_list:
if self.alias_map[alias][JOIN_TYPE] == self.LOUTER:
j_col = self.alias_map[alias][RHS_JOIN_COL]
entry = self.where_class()
entry.add(
(Constraint(alias, j_col, None), 'isnull', True),
AND
)
entry.negate()
self.where.add(entry, AND)
break
if not (lookup_type == 'in'
and not hasattr(value, 'as_sql')
and not hasattr(value, '_as_sql')
and not value) and field.null:
# Leaky abstraction artifact: We have to specifically
# exclude the "foo__in=[]" case from this handling, because
# it's short-circuited in the Where class.
# We also need to handle the case where a subquery is provided
self.where.add((Constraint(alias, col, None), 'isnull', False), AND)
if can_reuse is not None:
can_reuse.update(join_list)
if process_extras:
for filter in extra_filters:
self.add_filter(filter, negate=negate, can_reuse=can_reuse,
process_extras=False)
def add_q(self, q_object, used_aliases=None, force_having=False):
"""
Adds a Q-object to the current filter.
Can also be used to add anything that has an 'add_to_query()' method.
"""
if used_aliases is None:
used_aliases = self.used_aliases
if hasattr(q_object, 'add_to_query'):
# Complex custom objects are responsible for adding themselves.
q_object.add_to_query(self, used_aliases)
else:
if self.where and q_object.connector != AND and len(q_object) > 1:
self.where.start_subtree(AND)
subtree = True
else:
subtree = False
connector = AND
if q_object.connector == OR and not force_having:
force_having = self.need_force_having(q_object)
for child in q_object.children:
if connector == OR:
refcounts_before = self.alias_refcount.copy()
if force_having:
self.having.start_subtree(connector)
else:
self.where.start_subtree(connector)
if isinstance(child, Node):
self.add_q(child, used_aliases, force_having=force_having)
else:
self.add_filter(child, connector, q_object.negated,
can_reuse=used_aliases, force_having=force_having)
if force_having:
self.having.end_subtree()
else:
self.where.end_subtree()
if connector == OR:
# Aliases that were newly added or not used at all need to
# be promoted to outer joins if they are nullable relations.
# (they shouldn't turn the whole conditional into the empty
# set just because they don't match anything).
self.promote_unused_aliases(refcounts_before, used_aliases)
connector = q_object.connector
if q_object.negated:
self.where.negate()
if subtree:
self.where.end_subtree()
if self.filter_is_sticky:
self.used_aliases = used_aliases
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True,
allow_explicit_fk=False, can_reuse=None, negate=False,
process_extras=True):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are joining to), 'alias' is the alias for the
table we are joining to. If dupe_multis is True, any many-to-many or
many-to-one joins will always create a new alias (necessary for
disjunctive filters). If can_reuse is not None, it's a list of aliases
that can be reused in these joins (nothing else can be reused in this
case). Finally, 'negate' is used in the same sense as for add_filter()
-- it indicates an exclude() filter, or something similar. It is only
passed in here so that it can be passed to a field's extra_filter() for
customized behavior.
Returns the final field involved in the join, the target database
column (used for any 'where' constraint), the final 'opts' value and the
list of tables joined.
"""
joins = [alias]
last = [0]
dupe_set = set()
exclusions = set()
extra_filters = []
int_alias = None
for pos, name in enumerate(names):
if int_alias is not None:
exclusions.add(int_alias)
exclusions.add(alias)
last.append(len(joins))
if name == 'pk':
name = opts.pk.name
try:
field, model, direct, m2m = opts.get_field_by_name(name)
except FieldDoesNotExist:
for f in opts.fields:
if allow_explicit_fk and name == f.attname:
# XXX: A hack to allow foo_id to work in values() for
# backwards compatibility purposes. If we dropped that
# feature, this could be removed.
field, model, direct, m2m = opts.get_field_by_name(f.name)
break
else:
names = opts.get_all_field_names() + self.aggregate_select.keys()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
if not allow_many and (m2m or not direct):
for alias in joins:
self.unref_alias(alias)
raise MultiJoin(pos + 1)
if model:
# The field lives on a base class of the current model.
# Skip the chain of proxy to the concrete proxied model
proxied_model = opts.concrete_model
for int_model in opts.get_base_chain(model):
if int_model is proxied_model:
opts = int_model._meta
else:
lhs_col = opts.parents[int_model].column
dedupe = lhs_col in opts.duplicate_targets
if dedupe:
exclusions.update(self.dupe_avoidance.get(
(id(opts), lhs_col), ()))
dupe_set.add((opts, lhs_col))
opts = int_model._meta
alias = self.join((alias, opts.db_table, lhs_col,
opts.pk.column), exclusions=exclusions)
joins.append(alias)
exclusions.add(alias)
for (dupe_opts, dupe_col) in dupe_set:
self.update_dupe_avoidance(dupe_opts, dupe_col,
alias)
cached_data = opts._join_cache.get(name)
orig_opts = opts
dupe_col = direct and field.column or field.field.column
dedupe = dupe_col in opts.duplicate_targets
if dupe_set or dedupe:
if dedupe:
dupe_set.add((opts, dupe_col))
exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col),
()))
if process_extras and hasattr(field, 'extra_filters'):
extra_filters.extend(field.extra_filters(names, pos, negate))
if direct:
if m2m:
# Many-to-many field defined on the current model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
to_col1 = field.m2m_column_name()
opts = field.rel.to._meta
table2 = opts.db_table
from_col2 = field.m2m_reverse_name()
to_col2 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
if int_alias == table2 and from_col2 == to_col2:
joins.append(int_alias)
alias = int_alias
else:
alias = self.join(
(int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
elif field.rel:
# One-to-one or many-to-one field
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
opts = field.rel.to._meta
target = field.rel.get_related_field()
table = opts.db_table
from_col = field.column
to_col = target.column
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
exclusions=exclusions, nullable=field.null)
joins.append(alias)
else:
# Non-relation fields.
target = field
break
else:
orig_field = field
field = field.field
if m2m:
# Many-to-many field defined on the target model.
if cached_data:
(table1, from_col1, to_col1, table2, from_col2,
to_col2, opts, target) = cached_data
else:
table1 = field.m2m_db_table()
from_col1 = opts.get_field_by_name(
field.m2m_reverse_target_field_name())[0].column
to_col1 = field.m2m_reverse_name()
opts = orig_field.opts
table2 = opts.db_table
from_col2 = field.m2m_column_name()
to_col2 = opts.get_field_by_name(
field.m2m_target_field_name())[0].column
target = opts.pk
orig_opts._join_cache[name] = (table1, from_col1,
to_col1, table2, from_col2, to_col2, opts,
target)
int_alias = self.join((alias, table1, from_col1, to_col1),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
alias = self.join((int_alias, table2, from_col2, to_col2),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.extend([int_alias, alias])
else:
# One-to-many field (ForeignKey defined on the target model)
if cached_data:
(table, from_col, to_col, opts, target) = cached_data
else:
local_field = opts.get_field_by_name(
field.rel.field_name)[0]
opts = orig_field.opts
table = opts.db_table
from_col = local_field.column
to_col = field.column
# In case of a recursive FK, use the to_field for
# reverse lookups as well
if orig_field.model is local_field.model:
target = opts.get_field_by_name(
field.rel.field_name)[0]
else:
target = opts.pk
orig_opts._join_cache[name] = (table, from_col, to_col,
opts, target)
alias = self.join((alias, table, from_col, to_col),
dupe_multis, exclusions, nullable=True,
reuse=can_reuse)
joins.append(alias)
for (dupe_opts, dupe_col) in dupe_set:
if int_alias is None:
to_avoid = alias
else:
to_avoid = int_alias
self.update_dupe_avoidance(dupe_opts, dupe_col, to_avoid)
if pos != len(names) - 1:
if pos == len(names) - 2:
raise FieldError("Join on field %r not permitted. Did you misspell %r for the lookup type?" % (name, names[pos + 1]))
else:
raise FieldError("Join on field %r not permitted." % name)
return field, target, opts, joins, last, extra_filters
def trim_joins(self, target, join_list, last, trim, nonnull_check=False):
"""
Sometimes joins at the end of a multi-table sequence can be trimmed. If
the final join is against the same column as we are comparing against,
and is an inner join, we can go back one step in a join chain and
compare against the LHS of the join instead (and then repeat the
optimization). The result, potentially, involves fewer table joins.
The 'target' parameter is the final field being joined to, 'join_list'
is the full list of join aliases.
The 'last' list contains offsets into 'join_list', corresponding to
each component of the filter. Many-to-many relations, for example, add
two tables to the join list and we want to deal with both tables the
same way, so 'last' has an entry for the first of the two tables and
then the table immediately after the second table, in that case.
The 'trim' parameter forces the final piece of the join list to be
trimmed before anything. See the documentation of add_filter() for
details about this.
The 'nonnull_check' parameter is True when we are using inner joins
between tables explicitly to exclude NULL entries. In that case, the
tables shouldn't be trimmed, because the very action of joining to them
alters the result set.
Returns the final active column and table alias and the new active
join_list.
"""
final = len(join_list)
penultimate = last.pop()
if penultimate == final:
penultimate = last.pop()
if trim and final > 1:
extra = join_list[penultimate:]
join_list = join_list[:penultimate]
final = penultimate
penultimate = last.pop()
col = self.alias_map[extra[0]][LHS_JOIN_COL]
for alias in extra:
self.unref_alias(alias)
else:
col = target.column
alias = join_list[-1]
while final > 1:
join = self.alias_map[alias]
if (col != join[RHS_JOIN_COL] or join[JOIN_TYPE] != self.INNER or
nonnull_check):
break
self.unref_alias(alias)
alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
join_list.pop()
final -= 1
if final == penultimate:
penultimate = last.pop()
return col, alias, join_list
def update_dupe_avoidance(self, opts, col, alias):
"""
For a column that is one of multiple pointing to the same table, update
the internal data structures to note that this alias shouldn't be used
for those other columns.
"""
ident = id(opts)
for name in opts.duplicate_targets[col]:
try:
self.dupe_avoidance[ident, name].add(alias)
except KeyError:
self.dupe_avoidance[ident, name] = set([alias])
def split_exclude(self, filter_expr, prefix, can_reuse):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
"""
query = Query(self.model)
query.add_filter(filter_expr, can_reuse=can_reuse)
query.bump_prefix()
query.clear_ordering(True)
query.set_start(prefix)
# Adding extra check to make sure the selected field will not be null
# since we are adding a IN <subquery> clause. This prevents the
# database from tripping over IN (...,NULL,...) selects and returning
# nothing
alias, col = query.select[0]
query.where.add((Constraint(alias, col, None), 'isnull', False), AND)
self.add_filter(('%s__in' % prefix, query), negate=True, trim=True,
can_reuse=can_reuse)
# If there's more than one join in the inner query (before any initial
# bits were trimmed -- which means the last active table is more than
# two places into the alias list), we need to also handle the
# possibility that the earlier joins don't match anything by adding a
# comparison to NULL (e.g. in
# Tag.objects.exclude(parent__parent__name='t1'), a tag with no parent
# would otherwise be overlooked).
active_positions = [pos for (pos, count) in
enumerate(query.alias_refcount.itervalues()) if count]
if active_positions[-1] > 1:
self.add_filter(('%s__isnull' % prefix, False), negate=True,
trim=True, can_reuse=can_reuse)
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
def clear_limits(self):
"""
Clears any existing limits.
"""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = []
self.select_fields = []
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
for name in field_names:
field, target, u2, joins, u3, u4 = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, False, allow_m2m,
True)
final_alias = joins[-1]
col = target.column
if len(joins) > 1:
join = self.alias_map[final_alias]
if col == join[RHS_JOIN_COL]:
self.unref_alias(final_alias)
final_alias = join[LHS_ALIAS]
col = join[LHS_JOIN_COL]
joins = joins[:-1]
self.promote_alias_chain(joins[1:])
self.select.append((final_alias, col))
self.select_fields.append(field)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
names = opts.get_all_field_names() + self.extra.keys() + self.aggregate_select.keys()
names.sort()
raise FieldError("Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names)))
self.remove_inherited_models()
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or ordinals,
corresponding to column positions in the 'select' list.
If 'ordering' is empty, all ordering is cleared from the query.
"""
errors = []
for item in ordering:
if not ORDER_PATTERN.match(item):
errors.append(item)
if errors:
raise FieldError('Invalid order_by arguments: %s' % errors)
if ordering:
self.order_by.extend(ordering)
else:
self.default_ordering = False
def clear_ordering(self, force_empty=False):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
self.extra_order_by = ()
if force_empty:
self.default_ordering = False
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
self.group_by = []
for sel in self.select:
self.group_by.append(sel)
def add_count_column(self):
"""
Converts the query to do count(...) or count(distinct(pk)) in order to
get its size.
"""
if not self.distinct:
if not self.select:
count = self.aggregates_module.Count('*', is_summary=True)
else:
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select': %r" % self.select
count = self.aggregates_module.Count(self.select[0])
else:
opts = self.model._meta
if not self.select:
count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column),
is_summary=True, distinct=True)
else:
# Because of SQL portability issues, multi-column, distinct
# counts need a sub-query -- see get_count() for details.
assert len(self.select) == 1, \
"Cannot add count col with multiple cols in 'select'."
count = self.aggregates_module.Count(self.select[0], distinct=True)
# Distinct handling is done in Count(), so don't do it at this
# level.
self.distinct = False
# Set only aggregate to be the count column.
# Clear out the select cache to reflect the new unmasked aggregates.
self.aggregates = {None: count}
self.set_aggregate_mask(None)
self.group_by = None
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
field_dict = {}
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
self.related_select_cols = []
self.related_select_fields = []
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = SortedDict()
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
entry = force_unicode(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
entry_params.append(param_iter.next())
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
# This is order preserving, since self.extra_select is a SortedDict.
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL colum names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
self.deferred_loading = existing.difference(field_names), False
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if 'pk' in field_names:
field_names.remove('pk')
field_names.add(self.model._meta.pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = field_names, False
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of it's fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
"""
collection = {}
self.deferred_to_data(collection, self.get_loaded_field_names_cb)
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
target[model] = set([f.name for f in fields])
def set_aggregate_mask(self, names):
"Set the mask of aggregates that will actually be returned by the SELECT"
if names is None:
self.aggregate_select_mask = None
else:
self.aggregate_select_mask = set(names)
self._aggregate_select_cache = None
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def _aggregate_select(self):
"""The SortedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
if self._aggregate_select_cache is not None:
return self._aggregate_select_cache
elif self.aggregate_select_mask is not None:
self._aggregate_select_cache = SortedDict([
(k,v) for k,v in self.aggregates.items()
if k in self.aggregate_select_mask
])
return self._aggregate_select_cache
else:
return self.aggregates
aggregate_select = property(_aggregate_select)
def _extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
elif self.extra_select_mask is not None:
self._extra_select_cache = SortedDict([
(k,v) for k,v in self.extra.items()
if k in self.extra_select_mask
])
return self._extra_select_cache
else:
return self.extra
extra_select = property(_extra_select)
def set_start(self, start):
"""
Sets the table from which to start joining. The start position is
specified by the related attribute from the base model. This will
automatically set to the select column to be the column linked from the
previous table.
This method is primarily for internal use and the error checking isn't
as friendly as add_filter(). Mostly useful for querying directly
against the join table of many-to-many relation in a subquery.
"""
opts = self.model._meta
alias = self.get_initial_alias()
field, col, opts, joins, last, extra = self.setup_joins(
start.split(LOOKUP_SEP), opts, alias, False)
select_col = self.alias_map[joins[1]][LHS_JOIN_COL]
select_alias = alias
# The call to setup_joins added an extra reference to everything in
# joins. Reverse that.
for alias in joins:
self.unref_alias(alias)
# We might be able to trim some joins from the front of this query,
# providing that we only traverse "always equal" connections (i.e. rhs
# is *always* the same value as lhs).
for alias in joins[1:]:
join_info = self.alias_map[alias]
if (join_info[LHS_JOIN_COL] != select_col
or join_info[JOIN_TYPE] != self.INNER):
break
self.unref_alias(select_alias)
select_alias = join_info[RHS_ALIAS]
select_col = join_info[RHS_JOIN_COL]
self.select = [(select_alias, select_col)]
self.remove_inherited_models()
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == '-':
return field[1:], dirn[1]
return field, dirn[0]
def setup_join_cache(sender, **kwargs):
"""
The information needed to join between model fields is something that is
invariant over the life of the model, so we cache it in the model's Options
class, rather than recomputing it all the time.
This method initialises the (empty) cache when the model is created.
"""
sender._meta._join_cache = {}
signals.class_prepared.connect(setup_join_cache)
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
"""
if key in data:
data[key].add(value)
else:
data[key] = set([value])
| apache-2.0 | 8,920,453,358,841,485,000 | 42.204625 | 133 | 0.568494 | false |
ptisserand/ansible | lib/ansible/modules/network/nxos/nxos_udld.py | 16 | 7946 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_udld
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages UDLD global configuration params.
description:
- Manages UDLD global configuration params.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Module will fail if the udld feature has not been previously enabled.
options:
aggressive:
description:
- Toggles aggressive mode.
choices: ['enabled','disabled']
msg_time:
description:
- Message time in seconds for UDLD packets or keyword 'default'.
reset:
description:
- Ability to reset all ports shut down by UDLD. 'state' parameter
cannot be 'absent' when this is present.
type: bool
default: 'no'
state:
description:
- Manage the state of the resource. When set to 'absent',
aggressive and msg_time are set to their default values.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure udld aggressive mode is globally disabled and se global message interval is 20
- nxos_udld:
aggressive: disabled
msg_time: 20
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# Ensure agg mode is globally enabled and msg time is 15
- nxos_udld:
aggressive: enabled
msg_time: 15
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
existing:
description:
- k/v pairs of existing udld configuration
returned: always
type: dict
sample: {"aggressive": "disabled", "msg_time": "15"}
end_state:
description: k/v pairs of udld configuration after module execution
returned: always
type: dict
sample: {"aggressive": "enabled", "msg_time": "40"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["udld message-time 40", "udld aggressive"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import get_capabilities, nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
PARAM_TO_DEFAULT_KEYMAP = {
'msg_time': '15',
}
def execute_show_command(command, module, command_type='cli_show'):
device_info = get_capabilities(module)
network_api = device_info.get('network_api', 'nxapi')
if network_api == 'cliconf':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif network_api == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_commands_config_udld_global(delta, reset, existing):
commands = []
for param, value in delta.items():
if param == 'aggressive':
command = 'udld aggressive' if value == 'enabled' else 'no udld aggressive'
commands.append(command)
elif param == 'msg_time':
if value == 'default':
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
commands.append('no udld message-time')
else:
commands.append('udld message-time ' + value)
if reset:
command = 'udld reset'
commands.append(command)
return commands
def get_commands_remove_udld_global(existing):
commands = []
if existing.get('aggressive') == 'enabled':
command = 'no udld aggressive'
commands.append(command)
if existing.get('msg_time') != PARAM_TO_DEFAULT_KEYMAP.get('msg_time'):
command = 'no udld message-time'
commands.append(command)
return commands
def get_udld_global(module):
command = 'show udld global'
udld_table = execute_show_command(command, module)[0]
status = str(udld_table.get('udld-global-mode', None))
if status == 'enabled-aggressive':
aggressive = 'enabled'
else:
aggressive = 'disabled'
interval = str(udld_table.get('message-interval', None))
udld = dict(msg_time=interval, aggressive=aggressive)
return udld
def main():
argument_spec = dict(
aggressive=dict(required=False, choices=['enabled', 'disabled']),
msg_time=dict(required=False, type='str'),
reset=dict(required=False, type='bool'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
aggressive = module.params['aggressive']
msg_time = module.params['msg_time']
reset = module.params['reset']
state = module.params['state']
if reset and state == 'absent':
module.fail_json(msg="state must be present when using reset flag.")
args = dict(aggressive=aggressive, msg_time=msg_time, reset=reset)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_udld_global(module)
end_state = existing
delta = set(proposed.items()).difference(existing.items())
changed = False
commands = []
if state == 'present':
if delta:
command = get_commands_config_udld_global(dict(delta), reset, existing)
commands.append(command)
elif state == 'absent':
command = get_commands_remove_udld_global(existing)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_udld_global(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 | -1,384,887,053,501,105,400 | 28.649254 | 88 | 0.626227 | false |
peterm-itr/edx-platform | cms/djangoapps/contentstore/features/common.py | 4 | 12232 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
import os
from lettuce import world, step
from nose.tools import assert_true, assert_in # pylint: disable=no-name-in-module
from django.conf import settings
from student.roles import CourseStaffRole, CourseInstructorRole, GlobalStaff
from student.models import get_user
from selenium.webdriver.common.keys import Keys
from logging import getLogger
from student.tests.factories import AdminFactory
from student import auth
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.action.delete-section-button'
elif category == 'subsection':
css = 'a.action.delete-subsection-button'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('I have populated a new course in Studio$')
def i_have_populated_a_new_course(_step):
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification a.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='robot+studio@edx.org',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='robot+studio@edx.org',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('h2.title', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
global_admin = AdminFactory()
for role in (CourseStaffRole, CourseInstructorRole):
auth.add_users(global_admin, role(course.id), user)
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user('robot+studio@edx.org')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section():
world.css_click('.outline .button-new')
assert_true(world.is_css_present('.outline-section .xblock-field-value'))
def set_date_and_time(date_css, desired_date, time_css, desired_time, key=None):
set_element_value(date_css, desired_date, key)
world.wait_for_ajax_complete()
set_element_value(time_css, desired_time, key)
world.wait_for_ajax_complete()
def set_element_value(element_css, element_value, key=None):
element = world.css_find(element_css).first
element.fill(element_value)
# hit TAB or provided key to trigger save content
if key is not None:
element._element.send_keys(getattr(Keys, key)) # pylint: disable=protected-access
else:
element._element.send_keys(Keys.TAB) # pylint: disable=protected-access
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_unit_from_course_outline():
"""
Expands the section and clicks on the New Unit link.
The end result is the page where the user is editing the new unit.
"""
css_selectors = [
'.outline-subsection .expand-collapse', '.outline-subsection .button-new'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
world.wait_for_loading()
assert world.is_css_present('ul.new-component-type')
@world.absorb
def wait_for_loading():
"""
Waits for the loading indicator to be hidden.
"""
world.wait_for(lambda _driver: len(world.browser.find_by_css('div.ui-loading.is-hidden')) > 0)
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
step.given('I have populated a new course in Studio')
create_unit_from_course_outline()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See cms/static/js/views/feedback_prompt.js for implementation
"""
assert intent in [
'warning',
'error',
'confirmation',
'announcement',
'step-required',
'help',
'mini',
]
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > a.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text, find_prefix="$"):
script = """
var cm = {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror;
cm.getInputField().focus();
cm.setValue(arguments[0]);
cm.getInputField().blur();""".format(index=index, find_prefix=find_prefix)
world.browser.driver.execute_script(script, str(text))
world.wait_for_ajax_complete()
def get_codemirror_value(index=0, find_prefix="$"):
return world.browser.driver.execute_script(
"""
return {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror.getValue();
""".format(index=index, find_prefix=find_prefix)
)
def attach_file(filename, sub_path):
path = os.path.join(TEST_ROOT, sub_path, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
assert_true(os.path.exists(path))
world.browser.attach_file('file', os.path.abspath(path))
def upload_file(filename, sub_path=''):
attach_file(filename, sub_path)
button_css = '.wrapper-modal-window-assetupload .action-upload'
world.css_click(button_css)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
GlobalStaff().add_users(user)
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = (CourseStaffRole, CourseInstructorRole)
else:
roles = (CourseStaffRole,)
course_key = world.scenario_dict["COURSE"].id
global_admin = AdminFactory()
for role in roles:
auth.add_users(global_admin, role(course_key), user)
@step('I log out')
def log_out(_step):
world.visit('logout')
| agpl-3.0 | 7,195,979,226,638,403,000 | 29.888889 | 100 | 0.663506 | false |
pyocd/pyOCD | pyocd/target/builtin/target_MKE15Z256xxx7.py | 3 | 6018 | # pyOCD debugger
# Copyright (c) 2018-2019 Arm Limited
# Copyright (c) 2017 NXP
# Copyright (c) 2016 Freescale Semiconductor, Inc.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..family.target_kinetis import Kinetis
from ..family.flash_kinetis import Flash_Kinetis
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
RCM_MR = 0x4007f010
RCM_MR_BOOTROM_MASK = 0x6
FLASH_ALGO = {
'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x4829b510, 0x60414927, 0x60814928, 0x22806801, 0x22204391, 0x60014311, 0x44484825, 0xf84cf000,
0xd0002800, 0xbd102001, 0x47702000, 0xb5104820, 0x44484920, 0xf88ef000, 0xd1042800, 0x2100481c,
0xf0004448, 0xbd10f948, 0x4c19b570, 0x444c4605, 0x4b184601, 0x68e24620, 0xf8b5f000, 0xd1052800,
0x46292300, 0x68e24620, 0xf93ff000, 0xb570bd70, 0x460b460c, 0x46014606, 0xb084480d, 0x44484615,
0xf8e4f000, 0xd10a2800, 0x90029001, 0x48082101, 0x462b9100, 0x46314622, 0xf0004448, 0xb004f96d,
0x0000bd70, 0xd928c520, 0x40052000, 0x0000ffff, 0x00000004, 0x6b65666b, 0xd00b2800, 0x68c949dd,
0x0f090109, 0xd007290f, 0x00494adb, 0x5a51447a, 0xe0030289, 0x47702004, 0x04892101, 0x2300b430,
0x60416003, 0x02cc2101, 0x608160c4, 0x7a0d49d3, 0x40aa158a, 0x7ac96142, 0x61816103, 0x06892105,
0x62016244, 0x2000bc30, 0x28004770, 0x6101d002, 0x47702000, 0x47702004, 0x48c94602, 0x210168c0,
0x43080289, 0x60c849c6, 0x48c64770, 0x70012170, 0x70012180, 0x06097801, 0x7800d5fc, 0xd5010681,
0x47702067, 0xd50106c1, 0x47702068, 0xd0fc07c0, 0x47702069, 0xd1012800, 0x47702004, 0x4604b510,
0x48b94ab8, 0x48b96050, 0xd0014281, 0xe000206b, 0x28002000, 0x4620d107, 0xffd7f7ff, 0x46204603,
0xffcaf7ff, 0xbd104618, 0xd1012800, 0x47702004, 0x4614b510, 0x60622200, 0x60e260a2, 0x61626122,
0x61e261a2, 0x68c16021, 0x68816061, 0xf0006840, 0x60a0f953, 0x60e02008, 0x61606120, 0x200461a0,
0x200061e0, 0xb5ffbd10, 0x4615b089, 0x466a460c, 0xf7ff9809, 0x462affd9, 0x9b044621, 0xf0009809,
0x0007f90c, 0x9c00d130, 0x19659e01, 0x46311e6d, 0xf0004628, 0x2900f931, 0x1c40d002, 0x1e454370,
0xd81d42ac, 0x20090221, 0x06000a09, 0x488d1809, 0x498e6041, 0x4288980c, 0x206bd001, 0x2000e000,
0xd1112800, 0xf7ff9809, 0x4607ff80, 0x69009809, 0xd0002800, 0x2f004780, 0x19a4d102, 0xd9e142ac,
0xf7ff9809, 0x4638ff69, 0xbdf0b00d, 0xd1012a00, 0x47702004, 0xb089b5ff, 0x461e4614, 0x466a460d,
0xf7ff9809, 0x4632ff91, 0x9b034629, 0xf0009809, 0x0007f8c4, 0x9d00d12d, 0xd0262e00, 0x4871cc02,
0x99036081, 0xd0022904, 0xd0072908, 0x022ae00e, 0x0a122103, 0x18510649, 0xe0076041, 0x60c1cc02,
0x2107022a, 0x06090a12, 0x60411851, 0xf7ff9809, 0x4607ff3c, 0x69009809, 0xd0002800, 0x2f004780,
0x9803d103, 0x1a361945, 0x9809d1d8, 0xff24f7ff, 0xb00d4638, 0x2800bdf0, 0x4a5dd005, 0x18890409,
0x60514a58, 0x2004e721, 0xb5ff4770, 0x4614b08b, 0x460d461e, 0x980b466a, 0xff46f7ff, 0x46294622,
0x980b9b05, 0xf879f000, 0xd1332800, 0x4629466a, 0xf7ff980b, 0x9d00ff39, 0x90089802, 0x42404269,
0x424f4001, 0xd10142af, 0x183f9808, 0xd0202c00, 0x90090230, 0x42a61b7e, 0x4626d900, 0x99054630,
0xf88af000, 0x2101022a, 0x06090a12, 0x493d1852, 0x9a09604a, 0x43100400, 0x608830ff, 0xf7ff980b,
0x2800fee4, 0x9808d106, 0x19ad1ba4, 0x2c00183f, 0x2000d1e0, 0xbdf0b00f, 0xd1012b00, 0x47702004,
0xb089b5ff, 0x461d4616, 0x466a460c, 0x98099f12, 0xfefaf7ff, 0x46214632, 0x98099b07, 0xf82df000,
0xd11d2800, 0x2e009c00, 0x492ad01a, 0x18470638, 0x20010221, 0x06400a09, 0x48221809, 0x60876041,
0x60c16829, 0xf7ff9809, 0x2800feb0, 0x9913d00a, 0xd0002900, 0x9914600c, 0xd0012900, 0x600a2200,
0xbdf0b00d, 0x1a769907, 0x00890889, 0x9907194d, 0x2e00190c, 0xb00dd1dc, 0x2800bdf0, 0x2004d101,
0xb4104770, 0x460c1e5b, 0xd101421c, 0xd002421a, 0x2065bc10, 0x68034770, 0xd804428b, 0x18896840,
0x42881818, 0xbc10d202, 0x47702066, 0x2000bc10, 0x00004770, 0x40048040, 0x000003bc, 0x40020020,
0xf0003000, 0x40020000, 0x44ffffff, 0x6b65666b, 0x4000ffff, 0x00ffffff, 0x460bb530, 0x20004601,
0x24012220, 0x460de009, 0x429d40d5, 0x461dd305, 0x1b494095, 0x40954625, 0x46151940, 0x2d001e52,
0xbd30dcf1, 0x40020004, 0x40020010, 0x00100008, 0x00200018, 0x00400030, 0x00800060, 0x010000c0,
0x02000180, 0x04000300, 0x00000600, 0x00000000,
],
'pc_init' : 0x20000021,
'pc_unInit': 0x20000049,
'pc_program_page': 0x2000008F,
'pc_erase_sector': 0x20000069,
'pc_eraseAll' : 0x2000004D,
'static_base' : 0x20000000 + 0x00000020 + 0x000004ac,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_size' : 0x00000800,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff000, # Analyzer 0x1ffff000..0x1ffff600
'page_buffers' : [0x20003000, 0x20004000], # Enable double buffering
'min_program_length' : 8,
}
class KE15Z7(Kinetis):
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x40000, blocksize=0x800, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1fffe000, length=0x8000)
)
def __init__(self, session):
super(KE15Z7, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("MKE15Z7.svd")
def post_connect_hook(self):
# Disable ROM vector table remapping.
self.write32(RCM_MR, RCM_MR_BOOTROM_MASK)
| apache-2.0 | -3,661,056,166,638,669,000 | 58 | 102 | 0.759056 | false |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.4/django/views/generic/create_update.py | 87 | 8928 | from django.forms.models import ModelFormMetaclass, ModelForm
from django.template import RequestContext, loader
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.xheaders import populate_xheaders
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.utils.translation import ugettext
from django.contrib.auth.views import redirect_to_login
from django.views.generic import GenericViewError
from django.contrib import messages
import warnings
warnings.warn(
'Function-based generic views have been deprecated; use class-based views instead.',
DeprecationWarning
)
def apply_extra_context(extra_context, context):
"""
Adds items from extra_context dict to context. If a value in extra_context
is callable, then it is called and the result is added to context.
"""
for key, value in extra_context.iteritems():
if callable(value):
context[key] = value()
else:
context[key] = value
def get_model_and_form_class(model, form_class):
"""
Returns a model and form class based on the model and form_class
parameters that were passed to the generic view.
If ``form_class`` is given then its associated model will be returned along
with ``form_class`` itself. Otherwise, if ``model`` is given, ``model``
itself will be returned along with a ``ModelForm`` class created from
``model``.
"""
if form_class:
return form_class._meta.model, form_class
if model:
# The inner Meta class fails if model = model is used for some reason.
tmp_model = model
# TODO: we should be able to construct a ModelForm without creating
# and passing in a temporary inner class.
class Meta:
model = tmp_model
class_name = model.__name__ + 'Form'
form_class = ModelFormMetaclass(class_name, (ModelForm,), {'Meta': Meta})
return model, form_class
raise GenericViewError("Generic view must be called with either a model or"
" form_class argument.")
def redirect(post_save_redirect, obj):
"""
Returns a HttpResponseRedirect to ``post_save_redirect``.
``post_save_redirect`` should be a string, and can contain named string-
substitution place holders of ``obj`` field names.
If ``post_save_redirect`` is None, then redirect to ``obj``'s URL returned
by ``get_absolute_url()``. If ``obj`` has no ``get_absolute_url`` method,
then raise ImproperlyConfigured.
This function is meant to handle the post_save_redirect parameter to the
``create_object`` and ``update_object`` views.
"""
if post_save_redirect:
return HttpResponseRedirect(post_save_redirect % obj.__dict__)
elif hasattr(obj, 'get_absolute_url'):
return HttpResponseRedirect(obj.get_absolute_url())
else:
raise ImproperlyConfigured(
"No URL to redirect to. Either pass a post_save_redirect"
" parameter to the generic view or define a get_absolute_url"
" method on the Model.")
def lookup_object(model, object_id, slug, slug_field):
"""
Return the ``model`` object with the passed ``object_id``. If
``object_id`` is None, then return the object whose ``slug_field``
equals the passed ``slug``. If ``slug`` and ``slug_field`` are not passed,
then raise Http404 exception.
"""
lookup_kwargs = {}
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise GenericViewError(
"Generic view must be called with either an object_id or a"
" slug/slug_field.")
try:
return model.objects.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for %s"
% (model._meta.verbose_name, lookup_kwargs))
def create_object(request, model=None, template_name=None,
template_loader=loader, extra_context=None, post_save_redirect=None,
login_required=False, context_processors=None, form_class=None):
"""
Generic object-creation function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
if request.method == 'POST':
form = form_class(request.POST, request.FILES)
if form.is_valid():
new_object = form.save()
msg = ugettext("The %(verbose_name)s was created successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, new_object)
else:
form = form_class()
# Create the template, context, response
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
}, context_processors)
apply_extra_context(extra_context, c)
return HttpResponse(t.render(c))
def update_object(request, model=None, object_id=None, slug=None,
slug_field='slug', template_name=None, template_loader=loader,
extra_context=None, post_save_redirect=None, login_required=False,
context_processors=None, template_object_name='object',
form_class=None):
"""
Generic object-update function.
Templates: ``<app_label>/<model_name>_form.html``
Context:
form
the form for the object
object
the original object being edited
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
model, form_class = get_model_and_form_class(model, form_class)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=obj)
if form.is_valid():
obj = form.save()
msg = ugettext("The %(verbose_name)s was updated successfully.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return redirect(post_save_redirect, obj)
else:
form = form_class(instance=obj)
if not template_name:
template_name = "%s/%s_form.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'form': form,
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
def delete_object(request, model, post_delete_redirect, object_id=None,
slug=None, slug_field='slug', template_name=None,
template_loader=loader, extra_context=None, login_required=False,
context_processors=None, template_object_name='object'):
"""
Generic object-delete function.
The given template will be used to confirm deletetion if this view is
fetched using GET; for safty, deletion will only be performed if this
view is POSTed.
Templates: ``<app_label>/<model_name>_confirm_delete.html``
Context:
object
the original object being deleted
"""
if extra_context is None: extra_context = {}
if login_required and not request.user.is_authenticated():
return redirect_to_login(request.path)
obj = lookup_object(model, object_id, slug, slug_field)
if request.method == 'POST':
obj.delete()
msg = ugettext("The %(verbose_name)s was deleted.") %\
{"verbose_name": model._meta.verbose_name}
messages.success(request, msg, fail_silently=True)
return HttpResponseRedirect(post_delete_redirect)
else:
if not template_name:
template_name = "%s/%s_confirm_delete.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
apply_extra_context(extra_context, c)
response = HttpResponse(t.render(c))
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.attname))
return response
| bsd-3-clause | -6,012,595,282,562,014,000 | 39.39819 | 114 | 0.648634 | false |
alexallah/django | django/views/generic/edit.py | 24 | 8343 | from django.core.exceptions import ImproperlyConfigured
from django.forms import models as model_forms
from django.http import HttpResponseRedirect
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
from django.views.generic.detail import (
BaseDetailView, SingleObjectMixin, SingleObjectTemplateResponseMixin,
)
class FormMixin(ContextMixin):
"""Provide a way to show and handle a form in a request."""
initial = {}
form_class = None
success_url = None
prefix = None
def get_initial(self):
"""Return the initial data to use for forms on this view."""
return self.initial.copy()
def get_prefix(self):
"""Return the prefix to use for forms."""
return self.prefix
def get_form_class(self):
"""Return the form class to use."""
return self.form_class
def get_form(self, form_class=None):
"""Return an instance of the form to be used in this view."""
if form_class is None:
form_class = self.get_form_class()
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""Return the keyword arguments for instantiating the form."""
kwargs = {
'initial': self.get_initial(),
'prefix': self.get_prefix(),
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if not self.success_url:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
return str(self.success_url) # success_url may be lazy
def form_valid(self, form):
"""If the form is valid, redirect to the supplied URL."""
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
"""If the form is invalid, render the invalid form."""
return self.render_to_response(self.get_context_data(form=form))
def get_context_data(self, **kwargs):
"""Insert the form into the context dict."""
if 'form' not in kwargs:
kwargs['form'] = self.get_form()
return super().get_context_data(**kwargs)
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""Provide a way to show and handle a ModelForm in a request."""
fields = None
def get_form_class(self):
"""Return the form class to use in this view."""
if self.fields is not None and self.form_class:
raise ImproperlyConfigured(
"Specifying both 'fields' and 'form_class' is not permitted."
)
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
if self.fields is None:
raise ImproperlyConfigured(
"Using ModelFormMixin (base class of %s) without "
"the 'fields' attribute is prohibited." % self.__class__.__name__
)
return model_forms.modelform_factory(model, fields=self.fields)
def get_form_kwargs(self):
"""Return the keyword arguments for instantiating the form."""
kwargs = super().get_form_kwargs()
if hasattr(self, 'object'):
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if self.success_url:
url = self.success_url.format(**self.object.__dict__)
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
"""If the form is valid, save the associated model."""
self.object = form.save()
return super().form_valid(form)
class ProcessFormView(View):
"""Render a form on GET and processes it on POST."""
def get(self, request, *args, **kwargs):
"""Handle GET requests: instantiate a blank version of the form."""
return self.render_to_response(self.get_context_data())
def post(self, request, *args, **kwargs):
"""
Handle POST requests: instantiate a form instance with the passed
POST variables and then check if it's valid.
"""
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""A base view for displaying a form."""
class FormView(TemplateResponseMixin, BaseFormView):
"""A view for displaying a form and rendering a template response."""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super().post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating a new object, with a response rendered by a template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super().get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super().post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""View for updating an object, with a response rendered by a template."""
template_name_suffix = '_form'
class DeletionMixin:
"""Provide the ability to delete objects."""
success_url = None
def delete(self, request, *args, **kwargs):
"""
Call the delete() method on the fetched object and then redirect to the
success URL.
"""
self.object = self.get_object()
success_url = self.get_success_url()
self.object.delete()
return HttpResponseRedirect(success_url)
# Add support for browsers which only accept GET and POST for now.
def post(self, request, *args, **kwargs):
return self.delete(request, *args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url.format(**self.object.__dict__)
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with self.get_object(), with a
response rendered by a template.
"""
template_name_suffix = '_confirm_delete'
| bsd-3-clause | -7,877,874,950,149,136,000 | 33.618257 | 87 | 0.618722 | false |
gbaty/pyside2 | tests/signals/decorators_test.py | 3 | 2225 | #!/usr/bin/env python
import unittest
from PySide2.QtCore import QObject, Slot, SIGNAL, SLOT
class MyObject(QObject):
def __init__(self, parent=None):
QObject.__init__(self, parent)
self._slotCalledCount = 0
@Slot()
def mySlot(self):
self._slotCalledCount = self._slotCalledCount + 1
@Slot(int)
@Slot('QString')
def mySlot2(self, arg0):
self._slotCalledCount = self._slotCalledCount + 1
@Slot(name='mySlot3')
def foo(self):
self._slotCalledCount = self._slotCalledCount + 1
@Slot(str, int)
def mySlot4(self, a, b):
self._slotCalledCount = self._slotCalledCount + 1
@Slot(result=int)
def mySlot5(self):
self._slotCalledCount = self._slotCalledCount + 1
@Slot(result=QObject)
def mySlot6(self):
self._slotCalledCount = self._slotCalledCount + 1
class StaticMetaObjectTest(unittest.TestCase):
def testSignalPropagation(self):
o = MyObject()
m = o.metaObject()
self.assertTrue(m.indexOfSlot('mySlot()') > 0)
self.assertTrue(m.indexOfSlot('mySlot2(int)') > 0)
self.assertTrue(m.indexOfSlot('mySlot2(QString)') > 0)
self.assertTrue(m.indexOfSlot('mySlot3()') > 0)
self.assertTrue(m.indexOfSlot('mySlot4(QString,int)') > 0)
def testEmission(self):
o = MyObject()
o.connect(SIGNAL("mySignal()"), o, SLOT("mySlot()"))
o.emit(SIGNAL("mySignal()"))
self.assertTrue(o._slotCalledCount == 1)
def testResult(self):
o = MyObject()
mo = o.metaObject()
i = mo.indexOfSlot('mySlot5()')
m = mo.method(i)
self.assertEqual(m.typeName(), "int")
def testResultObject(self):
o = MyObject()
mo = o.metaObject()
i = mo.indexOfSlot('mySlot6()')
m = mo.method(i)
self.assertEqual(m.typeName(), "QObject*")
class SlotWithoutArgs(unittest.TestCase):
def testError(self):
# It should be an error to call the slot without the
# arguments, as just @Slot would end up in a slot
# accepting argument functions
self.assertRaises(TypeError, Slot, lambda: 3)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 | -6,163,953,406,768,717,000 | 27.896104 | 66 | 0.609888 | false |
ZihengJiang/mxnet | example/autoencoder/model.py | 27 | 2810 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
import numpy as np
import logging
from solver import Solver, Monitor
try:
import cPickle as pickle
except:
import pickle
def extract_feature(sym, args, auxs, data_iter, N, xpu=mx.cpu()):
input_buffs = [mx.nd.empty(shape, ctx=xpu) for k, shape in data_iter.provide_data]
input_names = [k for k, shape in data_iter.provide_data]
args = dict(args, **dict(zip(input_names, input_buffs)))
exe = sym.bind(xpu, args=args, aux_states=auxs)
outputs = [[] for i in exe.outputs]
output_buffs = None
data_iter.hard_reset()
for batch in data_iter:
for data, buff in zip(batch.data, input_buffs):
data.copyto(buff)
exe.forward(is_train=False)
if output_buffs is None:
output_buffs = [mx.nd.empty(i.shape, ctx=mx.cpu()) for i in exe.outputs]
else:
for out, buff in zip(outputs, output_buffs):
out.append(buff.asnumpy())
for out, buff in zip(exe.outputs, output_buffs):
out.copyto(buff)
for out, buff in zip(outputs, output_buffs):
out.append(buff.asnumpy())
outputs = [np.concatenate(i, axis=0)[:N] for i in outputs]
return dict(zip(sym.list_outputs(), outputs))
class MXModel(object):
def __init__(self, xpu=mx.cpu(), *args, **kwargs):
self.xpu = xpu
self.loss = None
self.args = {}
self.args_grad = {}
self.args_mult = {}
self.auxs = {}
self.setup(*args, **kwargs)
def save(self, fname):
args_save = {key: v.asnumpy() for key, v in self.args.items()}
with open(fname, 'wb') as fout:
pickle.dump(args_save, fout)
def load(self, fname):
with open(fname, 'rb') as fin:
args_save = pickle.load(fin)
for key, v in args_save.items():
if key in self.args:
self.args[key][:] = v
def setup(self, *args, **kwargs):
raise NotImplementedError("must override this")
| apache-2.0 | 2,725,879,101,608,685,600 | 35.493506 | 86 | 0.639146 | false |
switchboardOp/ansible | lib/ansible/modules/windows/win_msg.py | 22 | 3527 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_msg
version_added: "2.3"
short_description: Sends a message to logged in users on Windows hosts.
description:
- Wraps the msg.exe command in order to send messages to Windows hosts.
options:
to:
description:
- Who to send the message to. Can be a username, sessionname or sessionid.
default: '*'
display_seconds:
description:
- How long to wait for receiver to acknowledge message, in seconds.
default: 10
wait:
description:
- Whether to wait for users to respond. Module will only wait for the number of seconds specified in display_seconds or 10 seconds if not specified.
However, if I(wait) is true, the message is sent to each logged on user in turn, waiting for the user to either press 'ok' or for
the timeout to elapse before moving on to the next user.
type: bool
default: 'no'
msg:
description:
- The text of the message to be displayed.
default: Hello world!
author:
- Jon Hawkesworth (@jhawkesworth)
notes:
- This module must run on a windows host, so ensure your play targets windows
hosts, or delegates to a windows host.
- Messages are only sent to the local host where the module is run.
- The module does not support sending to users listed in a file.
- Setting wait to true can result in long run times on systems with many logged in users.
'''
EXAMPLES = r'''
- name: Warn logged in users of impending upgrade
win_msg:
display_seconds: 60
msg: Automated upgrade about to start. Please save your work and log off before {{ deployment_start_time }}
'''
RETURN = r'''
msg:
description: Test of the message that was sent.
returned: changed
type: string
sample: Automated upgrade about to start. Please save your work and log off before 22 July 2016 18:00:00
display_seconds:
description: Value of display_seconds module parameter.
returned: success
type: string
sample: 10
rc:
description: The return code of the API call
returned: always
type: int
sample: 0
runtime_seconds:
description: How long the module took to run on the remote windows host.
returned: success
type: string
sample: 22 July 2016 17:45:51
sent_localtime:
description: local time from windows host when the message was sent.
returned: success
type: string
sample: 22 July 2016 17:45:51
wait:
description: Value of wait module parameter.
returned: success
type: boolean
sample: false
'''
| gpl-3.0 | 7,653,764,283,444,003,000 | 33.242718 | 155 | 0.70258 | false |
ChronoMonochrome/android_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/misc_web_contents_backend.py | 24 | 1494 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from telemetry.core import web_contents
from telemetry.core.backends.chrome import inspector_backend
class MiscWebContentsBackend(object):
"""Provides acccess to chrome://oobe/login page, which is neither an extension
nor a tab."""
def __init__(self, browser_backend):
self._browser_backend = browser_backend
def GetOobe(self):
oobe_web_contents_info = self._FindWebContentsInfo()
if oobe_web_contents_info:
debugger_url = oobe_web_contents_info.get('webSocketDebuggerUrl')
if debugger_url:
inspector = self._CreateInspectorBackend(debugger_url)
return web_contents.WebContents(inspector)
return None
def _CreateInspectorBackend(self, debugger_url):
return inspector_backend.InspectorBackend(self._browser_backend.browser,
self._browser_backend,
debugger_url)
def _ListWebContents(self, timeout=None):
data = self._browser_backend.Request('', timeout=timeout)
return json.loads(data)
def _FindWebContentsInfo(self):
for web_contents_info in self._ListWebContents():
# Prior to crrev.com/203152, url was chrome://oobe/login.
if (web_contents_info.get('url').startswith('chrome://oobe')):
return web_contents_info
return None
| bsd-3-clause | 2,453,397,168,925,748,700 | 38.315789 | 80 | 0.685408 | false |
pbrod/numpy | numpy/distutils/fcompiler/absoft.py | 17 | 5499 |
# http://www.absoft.com/literature/osxuserguide.pdf
# http://www.absoft.com/documentation.html
# Notes:
# - when using -g77 then use -DUNDERSCORE_G77 to compile f2py
# generated extension modules (works for f2py v2.45.241_1936 and up)
import os
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from numpy.distutils.misc_util import cyg2win32
compilers = ['AbsoftFCompiler']
class AbsoftFCompiler(FCompiler):
compiler_type = 'absoft'
description = 'Absoft Corp Fortran Compiler'
#version_pattern = r'FORTRAN 77 Compiler (?P<version>[^\s*,]*).*?Absoft Corp'
version_pattern = r'(f90:.*?(Absoft Pro FORTRAN Version|FORTRAN 77 Compiler|Absoft Fortran Compiler Version|Copyright Absoft Corporation.*?Version))'+\
r' (?P<version>[^\s*,]*)(.*?Absoft Corp|)'
# on windows: f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-1998 mV2; Cray Research, Inc. 1994-1996 CF90 (2.x.x.x f36t87) Version 2.3 Wed Apr 19, 2006 13:05:16
# samt5735(8)$ f90 -V -c dummy.f
# f90: Copyright Absoft Corporation 1994-2002; Absoft Pro FORTRAN Version 8.0
# Note that fink installs g77 as f77, so need to use f90 for detection.
executables = {
'version_cmd' : None, # set by update_executables
'compiler_f77' : ["f77"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
if os.name=='nt':
library_switch = '/out:' #No space after /out:!
module_dir_switch = None
module_include_switch = '-p'
def update_executables(self):
f = cyg2win32(dummy_fortran_file())
self.executables['version_cmd'] = ['<F90>', '-V', '-c',
f+'.f', '-o', f+'.o']
def get_flags_linker_so(self):
if os.name=='nt':
opt = ['/dll']
# The "-K shared" switches are being left in for pre-9.0 versions
# of Absoft though I don't think versions earlier than 9 can
# actually be used to build shared libraries. In fact, version
# 8 of Absoft doesn't recognize "-K shared" and will fail.
elif self.get_version() >= '9.0':
opt = ['-shared']
else:
opt = ["-K", "shared"]
return opt
def library_dir_option(self, dir):
if os.name=='nt':
return ['-link', '/PATH:%s' % (dir)]
return "-L" + dir
def library_option(self, lib):
if os.name=='nt':
return '%s.lib' % (lib)
return "-l" + lib
def get_library_dirs(self):
opt = FCompiler.get_library_dirs(self)
d = os.environ.get('ABSOFT')
if d:
if self.get_version() >= '10.0':
# use shared libraries, the static libraries were not compiled -fPIC
prefix = 'sh'
else:
prefix = ''
if cpu.is_64bit():
suffix = '64'
else:
suffix = ''
opt.append(os.path.join(d, '%slib%s' % (prefix, suffix)))
return opt
def get_libraries(self):
opt = FCompiler.get_libraries(self)
if self.get_version() >= '11.0':
opt.extend(['af90math', 'afio', 'af77math', 'amisc'])
elif self.get_version() >= '10.0':
opt.extend(['af90math', 'afio', 'af77math', 'U77'])
elif self.get_version() >= '8.0':
opt.extend(['f90math', 'fio', 'f77math', 'U77'])
else:
opt.extend(['fio', 'f90math', 'fmath', 'U77'])
if os.name =='nt':
opt.append('COMDLG32')
return opt
def get_flags(self):
opt = FCompiler.get_flags(self)
if os.name != 'nt':
opt.extend(['-s'])
if self.get_version():
if self.get_version()>='8.2':
opt.append('-fpic')
return opt
def get_flags_f77(self):
opt = FCompiler.get_flags_f77(self)
opt.extend(['-N22', '-N90', '-N110'])
v = self.get_version()
if os.name == 'nt':
if v and v>='8.0':
opt.extend(['-f', '-N15'])
else:
opt.append('-f')
if v:
if v<='4.6':
opt.append('-B108')
else:
# Though -N15 is undocumented, it works with
# Absoft 8.0 on Linux
opt.append('-N15')
return opt
def get_flags_f90(self):
opt = FCompiler.get_flags_f90(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
if self.get_version():
if self.get_version()>'4.6':
opt.extend(["-YDEALLOC=ALL"])
return opt
def get_flags_fix(self):
opt = FCompiler.get_flags_fix(self)
opt.extend(["-YCFRL=1", "-YCOM_NAMES=LCS", "-YCOM_PFX", "-YEXT_PFX",
"-YCOM_SFX=_", "-YEXT_SFX=_", "-YEXT_NAMES=LCS"])
opt.extend(["-f", "fixed"])
return opt
def get_flags_opt(self):
opt = ['-O']
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='absoft').get_version())
| bsd-3-clause | -1,234,171,101,594,383,000 | 34.25 | 155 | 0.525186 | false |
eusi/MissionPlanerHM | Lib/site-packages/scipy/ndimage/_ni_support.py | 62 | 3326 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import types
import numpy
def _extend_mode_to_code(mode):
"""Convert an extension mode to the corresponding integer code.
"""
if mode == 'nearest':
return 0
elif mode == 'wrap':
return 1
elif mode == 'reflect':
return 2
elif mode == 'mirror':
return 3
elif mode == 'constant':
return 4
else:
raise RuntimeError('boundary mode not supported')
def _normalize_sequence(input, rank, array_type=None):
"""If input is a scalar, create a sequence of length equal to the
rank by duplicating the input. If input is a sequence,
check if its length is equal to the length of array.
"""
if (isinstance(input, (types.IntType, types.LongType,
types.FloatType))):
normalized = [input] * rank
else:
normalized = list(input)
if len(normalized) != rank:
err = "sequence argument must have length equal to input rank"
raise RuntimeError(err)
return normalized
def _get_output(output, input, shape=None):
if shape is None:
shape = input.shape
if output is None:
output = numpy.zeros(shape, dtype = input.dtype.name)
return_value = output
elif type(output) in [type(types.TypeType), type(numpy.zeros((4,)).dtype)]:
output = numpy.zeros(shape, dtype = output)
return_value = output
elif type(output) is types.StringType:
output = numpy.typeDict[output]
output = numpy.zeros(shape, dtype = output)
return_value = output
else:
if output.shape != shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def _check_axis(axis, rank):
if axis < 0:
axis += rank
if axis < 0 or axis >= rank:
raise ValueError('invalid axis')
return axis
| gpl-3.0 | -1,301,951,687,439,718,000 | 36.370787 | 79 | 0.679194 | false |
nikesh-mahalka/nova | nova/openstack/common/cliutils.py | 57 | 7940 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from nova.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list or objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
"""
pt = prettytable.PrettyTable([dict_property, 'Value'])
pt.align = 'l'
for k, v in six.iteritems(dct):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| apache-2.0 | 6,761,229,807,877,314,000 | 28.298893 | 79 | 0.606297 | false |
mitocw/edx-platform | openedx/features/enterprise_support/tests/test_logout.py | 3 | 2724 | """
Tests for logout for enterprise flow
"""
import ddt
import mock
from django.test.utils import override_settings
from django.urls import reverse
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from openedx.features.enterprise_support.api import enterprise_enabled
from openedx.features.enterprise_support.tests import (
FAKE_ENTERPRISE_CUSTOMER,
FEATURES_WITH_ENTERPRISE_ENABLED,
factories
)
from openedx.features.enterprise_support.tests.mixins.enterprise import EnterpriseServiceMockMixin
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
@ddt.ddt
@override_settings(FEATURES=FEATURES_WITH_ENTERPRISE_ENABLED)
@skip_unless_lms
class EnterpriseLogoutTests(EnterpriseServiceMockMixin, CacheIsolationTestCase, UrlResetMixin):
""" Tests for the enterprise logout functionality. """
def setUp(self):
super(EnterpriseLogoutTests, self).setUp()
self.user = UserFactory()
self.enterprise_customer = FAKE_ENTERPRISE_CUSTOMER
self.enterprise_learner = factories.EnterpriseCustomerUserFactory(user_id=self.user.id)
self.client.login(username=self.user.username, password='test')
patcher = mock.patch('openedx.features.enterprise_support.api.enterprise_customer_from_api')
self.mock_enterprise_customer_from_api = patcher.start()
self.mock_enterprise_customer_from_api.return_value = self.enterprise_customer
self.addCleanup(patcher.stop)
@ddt.data(
('https%3A%2F%2Ftest.edx.org%2Fcourses', False),
('/courses/course-v1:ARTS+D1+2018_T/course/', False),
('invalid-url', False),
('/enterprise/c5dad9a7-741c-4841-868f-850aca3ff848/course/Microsoft+DAT206x/enroll/', True),
('%2Fenterprise%2Fc5dad9a7-741c-4841-868f-850aca3ff848%2Fcourse%2FMicrosoft%2BDAT206x%2Fenroll%2F', True),
('/enterprise/handle_consent_enrollment/efd91463-dc40-4882-aeb9-38202131e7b2/course', True),
('%2Fenterprise%2Fhandle_consent_enrollment%2Fefd91463-dc40-4882-aeb9-38202131e7b2%2Fcourse', True),
)
@ddt.unpack
def test_logout_enterprise_target(self, redirect_url, enterprise_target):
url = '{logout_path}?redirect_url={redirect_url}'.format(
logout_path=reverse('logout'),
redirect_url=redirect_url
)
self.assertTrue(enterprise_enabled())
response = self.client.get(url, HTTP_HOST='testserver')
expected = {
'enterprise_target': enterprise_target,
}
self.assertDictContainsSubset(expected, response.context_data)
if enterprise_target:
self.assertContains(response, 'We are signing you in.')
| agpl-3.0 | 5,656,543,502,723,711,000 | 40.272727 | 114 | 0.72467 | false |
smathot/PyGaze | opensesame_plugins/pygaze_drift_correct/pygaze_drift_correct.py | 3 | 3469 | #-*- coding:utf-8 -*-
"""
This file is part of PyGaze.
PyGaze is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyGaze is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyGaze. If not, see <http://www.gnu.org/licenses/>.
"""
import inspect
from openexp.canvas import canvas
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from pygaze.display import Display
class pygaze_drift_correct(item):
"""Plug-in runtime definition."""
description = u'Perform eye-tracker drift correction'
def reset(self):
"""
desc:
Resets plug-in settings.
"""
self.var.xpos = 0
self.var.ypos = 0
self.var.fixation_triggered = u'no'
self.var.target_color = u'[foreground]'
self.var.target_style = u'default'
self.var.draw_target = u'yes'
def prepare_drift_correction_canvas(self):
"""A hook to prepare the canvas with the drift-correction target."""
if self.var.draw_target == u'yes':
self.dc_canvas = canvas(self.experiment)
self.dc_canvas.fixdot(self.var.xpos, self.var.ypos,
color=self.var.target_color, style=self.var.target_style)
else:
self.dc_canvas = None
def draw_drift_correction_canvas(self, x, y):
"""
A hook to show the canvas with the drift-correction target.
Arguments:
x -- The X coordinate (unused).
y -- The Y coordinate (unused).
"""
if self.dc_canvas is not None:
self.dc_canvas.show()
def prepare(self):
"""The preparation phase of the plug-in goes here."""
item.prepare(self)
self.prepare_drift_correction_canvas()
self.experiment.pygaze_eyetracker.set_draw_drift_correction_target_func(
self.draw_drift_correction_canvas)
def run(self):
"""The run phase of the plug-in goes here."""
self.set_item_onset()
if self.var.uniform_coordinates == u'yes':
xpos = self.var.width / 2 + self.var.xpos
ypos = self.var.height / 2 + self.var.ypos
else:
xpos = self.var.xpos
ypos = self.var.ypos
while True:
success = self.experiment.pygaze_eyetracker.drift_correction(
pos=(xpos, ypos),
fix_triggered=self.var.fixation_triggered==u'yes')
if success:
break
class qtpygaze_drift_correct(pygaze_drift_correct, qtautoplugin):
"""Plug-in GUI definition."""
def __init__(self, name, experiment, script=None):
"""
Constructor.
Arguments:
name -- The name of the plug-in.
experiment -- The experiment object.
Keyword arguments:
script -- A definition script. (default=None)
"""
pygaze_drift_correct.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
self.custom_interactions()
def apply_edit_changes(self):
"""Apply the controls"""
if not qtautoplugin.apply_edit_changes(self) or self.lock:
return False
self.custom_interactions()
def custom_interactions(self):
"""
Disables the target-style combobox if no target display should be drawn.
"""
draw_target = self.var.draw_target == u'yes'
self.combobox_target_style.setEnabled(draw_target)
self.line_edit_target_color.setEnabled(draw_target)
| gpl-3.0 | -544,204,679,658,063,700 | 24.88806 | 74 | 0.712886 | false |
LeonChambers/bwhglass-server | lib/werkzeug/utils.py | 317 | 22676 | # -*- coding: utf-8 -*-
"""
werkzeug.utils
~~~~~~~~~~~~~~
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
import sys
import pkgutil
try:
from html.entities import name2codepoint
except ImportError:
from htmlentitydefs import name2codepoint
from werkzeug._compat import unichr, text_type, string_types, iteritems, \
reraise, PY2
from werkzeug._internal import _DictAccessorProperty, \
_parse_signature, _missing
_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
_entity_re = re.compile(r'&([^;]+);')
_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
'LPT2', 'LPT3', 'PRN', 'NUL')
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is
# no entry with the same name in the instance's __dict__.
# this allows us to completely get rid of the access function call
# overhead. If one choses to invoke __get__ by hand the property
# will still work as expected because the lookup logic is replicated
# in __get__ for manual invocation.
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class environ_property(_DictAccessorProperty):
"""Maps request attributes to environment variables. This works not only
for the Werzeug request object, but also any other class with an
environ attribute:
>>> class Test(object):
... environ = {'key': 'value'}
... test = environ_property('key')
>>> var = Test()
>>> var.test
'value'
If you pass it a second value it's used as default if the key does not
exist, the third one can be a converter that takes a value and converts
it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value
is used. If no default value is provided `None` is used.
Per default the property is read only. You have to explicitly enable it
by passing ``read_only=False`` to the constructor.
"""
read_only = True
def lookup(self, obj):
return obj.environ
class header_property(_DictAccessorProperty):
"""Like `environ_property` but for headers."""
def lookup(self, obj):
return obj.headers
class HTMLBuilder(object):
"""Helper object for HTML generation.
Per default there are two instances of that class. The `html` one, and
the `xhtml` one for those two dialects. The class uses keyword parameters
and positional parameters to generate small snippets of HTML.
Keyword parameters are converted to XML/SGML attributes, positional
arguments are used as children. Because Python accepts positional
arguments before keyword arguments it's a good idea to use a list with the
star-syntax for some children:
>>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
... html.a('bar', href='bar.html')])
u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
This class works around some browser limitations and can not be used for
arbitrary SGML/XML generation. For that purpose lxml and similar
libraries exist.
Calling the builder escapes the string passed:
>>> html.p(html("<foo>"))
u'<p><foo></p>'
"""
_entity_re = re.compile(r'&([^;]+);')
_entities = name2codepoint.copy()
_entities['apos'] = 39
_empty_elements = set([
'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame',
'hr', 'img', 'input', 'keygen', 'isindex', 'link', 'meta', 'param',
'source', 'wbr'
])
_boolean_attributes = set([
'selected', 'checked', 'compact', 'declare', 'defer', 'disabled',
'ismap', 'multiple', 'nohref', 'noresize', 'noshade', 'nowrap'
])
_plaintext_elements = set(['textarea'])
_c_like_cdata = set(['script', 'style'])
def __init__(self, dialect):
self._dialect = dialect
def __call__(self, s):
return escape(s)
def __getattr__(self, tag):
if tag[:2] == '__':
raise AttributeError(tag)
def proxy(*children, **arguments):
buffer = '<' + tag
for key, value in iteritems(arguments):
if value is None:
continue
if key[-1] == '_':
key = key[:-1]
if key in self._boolean_attributes:
if not value:
continue
if self._dialect == 'xhtml':
value = '="' + key + '"'
else:
value = ''
else:
value = '="' + escape(value) + '"'
buffer += ' ' + key + value
if not children and tag in self._empty_elements:
if self._dialect == 'xhtml':
buffer += ' />'
else:
buffer += '>'
return buffer
buffer += '>'
children_as_string = ''.join([text_type(x) for x in children
if x is not None])
if children_as_string:
if tag in self._plaintext_elements:
children_as_string = escape(children_as_string)
elif tag in self._c_like_cdata and self._dialect == 'xhtml':
children_as_string = '/*<![CDATA[*/' + \
children_as_string + '/*]]>*/'
buffer += children_as_string + '</' + tag + '>'
return buffer
return proxy
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__,
self._dialect
)
html = HTMLBuilder('html')
xhtml = HTMLBuilder('xhtml')
def get_content_type(mimetype, charset):
"""Return the full content type string with charset for a mimetype.
If the mimetype represents text the charset will be appended as charset
parameter, otherwise the mimetype is returned unchanged.
:param mimetype: the mimetype to be used as content type.
:param charset: the charset to be appended in case it was a text mimetype.
:return: the content type.
"""
if mimetype.startswith('text/') or \
mimetype == 'application/xml' or \
(mimetype.startswith('application/') and
mimetype.endswith('+xml')):
mimetype += '; charset=' + charset
return mimetype
def format_string(string, context):
"""String-template format a string:
>>> format_string('$foo and ${foo}s', dict(foo=42))
'42 and 42s'
This does not do any attribute lookup etc. For more advanced string
formattings have a look at the `werkzeug.template` module.
:param string: the format string.
:param context: a dict with the variables to insert.
"""
def lookup_arg(match):
x = context[match.group(1) or match.group(2)]
if not isinstance(x, string_types):
x = type(string)(x)
return x
return _format_re.sub(lookup_arg, string)
def secure_filename(filename):
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows system the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you generate random
filename if the function returned an empty one.
.. versionadded:: 0.5
:param filename: the filename to secure
"""
if isinstance(filename, text_type):
from unicodedata import normalize
filename = normalize('NFKD', filename).encode('ascii', 'ignore')
if not PY2:
filename = filename.decode('ascii')
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, ' ')
filename = str(_filename_ascii_strip_re.sub('', '_'.join(
filename.split()))).strip('._')
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if os.name == 'nt' and filename and \
filename.split('.')[0].upper() in _windows_device_files:
filename = '_' + filename
return filename
def escape(s, quote=None):
"""Replace special characters "&", "<", ">" and (") to HTML-safe sequences.
There is a special handling for `None` which escapes to an empty string.
.. versionchanged:: 0.9
`quote` is now implicitly on.
:param s: the string to escape.
:param quote: ignored.
"""
if s is None:
return ''
elif hasattr(s, '__html__'):
return text_type(s.__html__())
elif not isinstance(s, string_types):
s = text_type(s)
if quote is not None:
from warnings import warn
warn(DeprecationWarning('quote parameter is implicit now'), stacklevel=2)
s = s.replace('&', '&').replace('<', '<') \
.replace('>', '>').replace('"', """)
return s
def unescape(s):
"""The reverse function of `escape`. This unescapes all the HTML
entities, not only the XML entities inserted by `escape`.
:param s: the string to unescape.
"""
def handle_match(m):
name = m.group(1)
if name in HTMLBuilder._entities:
return unichr(HTMLBuilder._entities[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, s)
def redirect(location, code=302):
"""Return a response object (a WSGI application) that, if called,
redirects the client to the target location. Supported codes are 301,
302, 303, 305, and 307. 300 is not supported because it's not a real
redirect and 304 because it's the answer for a request with a request
with defined If-Modified-Since headers.
.. versionadded:: 0.6
The location can now be a unicode string that is encoded using
the :func:`iri_to_uri` function.
:param location: the location the response should redirect to.
:param code: the redirect status code. defaults to 302.
"""
from werkzeug.wrappers import Response
display_location = escape(location)
if isinstance(location, text_type):
from werkzeug.urls import iri_to_uri
location = iri_to_uri(location)
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(escape(location), display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def append_slash_redirect(environ, code=301):
"""Redirect to the same URL but with a slash appended. The behavior
of this function is undefined if the path ends with a slash already.
:param environ: the WSGI environment for the request that triggers
the redirect.
:param code: the status code for the redirect.
"""
new_path = environ['PATH_INFO'].strip('/') + '/'
query_string = environ.get('QUERY_STRING')
if query_string:
new_path += '?' + query_string
return redirect(new_path, code)
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If `silent` is True the return value will be `None` if the import fails.
:param import_name: the dotted name for the object to import.
:param silent: if set to `True` import errors are ignored and
`None` is returned instead.
:return: imported object
"""
#XXX: py3 review needed
assert isinstance(import_name, string_types)
# force the import name to automatically convert to strings
import_name = str(import_name)
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
module, obj = import_name.rsplit('.', 1)
else:
return __import__(import_name)
# __import__ is not able to handle unicode strings in the fromlist
# if the module is a package
if PY2 and isinstance(obj, unicode):
obj = obj.encode('utf-8')
try:
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
# support importing modules not yet set up by the parent module
# (or package for that matter)
modname = module + '.' + obj
__import__(modname)
return sys.modules[modname]
except ImportError as e:
if not silent:
reraise(
ImportStringError,
ImportStringError(import_name, e),
sys.exc_info()[2])
def find_modules(import_path, include_packages=False, recursive=False):
"""Find all the modules below a package. This can be useful to
automatically import all views / controllers so that their metaclasses /
function decorators have a chance to register themselves on the
application.
Packages are not returned unless `include_packages` is `True`. This can
also recursively list modules but in that case it will import all the
packages to get the correct load path of that module.
:param import_name: the dotted name for the package to find child modules.
:param include_packages: set to `True` if packages should be returned, too.
:param recursive: set to `True` if recursion should happen.
:return: generator
"""
module = import_string(import_path)
path = getattr(module, '__path__', None)
if path is None:
raise ValueError('%r is not a package' % import_path)
basename = module.__name__ + '.'
for importer, modname, ispkg in pkgutil.iter_modules(path):
modname = basename + modname
if ispkg:
if include_packages:
yield modname
if recursive:
for item in find_modules(modname, include_packages, True):
yield item
else:
yield modname
def validate_arguments(func, args, kwargs, drop_extra=True):
"""Check if the function accepts the arguments and keyword arguments.
Returns a new ``(args, kwargs)`` tuple that can safely be passed to
the function without causing a `TypeError` because the function signature
is incompatible. If `drop_extra` is set to `True` (which is the default)
any extra positional or keyword arguments are dropped automatically.
The exception raised provides three attributes:
`missing`
A set of argument names that the function expected but where
missing.
`extra`
A dict of keyword arguments that the function can not handle but
where provided.
`extra_positional`
A list of values that where given by positional argument but the
function cannot accept.
This can be useful for decorators that forward user submitted data to
a view function::
from werkzeug.utils import ArgumentValidationError, validate_arguments
def sanitize(f):
def proxy(request):
data = request.values.to_dict()
try:
args, kwargs = validate_arguments(f, (request,), data)
except ArgumentValidationError:
raise BadRequest('The browser failed to transmit all '
'the data expected.')
return f(*args, **kwargs)
return proxy
:param func: the function the validation is performed against.
:param args: a tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:param drop_extra: set to `False` if you don't want extra arguments
to be silently dropped.
:return: tuple in the form ``(args, kwargs)``.
"""
parser = _parse_signature(func)
args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
if missing:
raise ArgumentValidationError(tuple(missing))
elif (extra or extra_positional) and not drop_extra:
raise ArgumentValidationError(None, extra, extra_positional)
return tuple(args), kwargs
def bind_arguments(func, args, kwargs):
"""Bind the arguments provided into a dict. When passed a function,
a tuple of arguments and a dict of keyword arguments `bind_arguments`
returns a dict of names as the function would see it. This can be useful
to implement a cache decorator that uses the function arguments to build
the cache key based on the values of the arguments.
:param func: the function the arguments should be bound for.
:param args: tuple of positional arguments.
:param kwargs: a dict of keyword arguments.
:return: a :class:`dict` of bound keyword arguments.
"""
args, kwargs, missing, extra, extra_positional, \
arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
values = {}
for (name, has_default, default), value in zip(arg_spec, args):
values[name] = value
if vararg_var is not None:
values[vararg_var] = tuple(extra_positional)
elif extra_positional:
raise TypeError('too many positional arguments')
if kwarg_var is not None:
multikw = set(extra) & set([x[0] for x in arg_spec])
if multikw:
raise TypeError('got multiple values for keyword argument ' +
repr(next(iter(multikw))))
values[kwarg_var] = extra
elif extra:
raise TypeError('got unexpected keyword argument ' +
repr(next(iter(extra))))
return values
class ArgumentValidationError(ValueError):
"""Raised if :func:`validate_arguments` fails to validate"""
def __init__(self, missing=None, extra=None, extra_positional=None):
self.missing = set(missing or ())
self.extra = extra or {}
self.extra_positional = extra_positional or []
ValueError.__init__(self, 'function arguments invalid. ('
'%d missing, %d additional)' % (
len(self.missing),
len(self.extra) + len(self.extra_positional)
))
class ImportStringError(ImportError):
"""Provides information about a failed :func:`import_string` attempt."""
#: String in dotted notation that failed to be imported.
import_name = None
#: Wrapped exception.
exception = None
def __init__(self, import_name, exception):
self.import_name = import_name
self.exception = exception
msg = (
'import_string() failed for %r. Possible reasons are:\n\n'
'- missing __init__.py in a package;\n'
'- package or module path not included in sys.path;\n'
'- duplicated package or module name taking precedence in '
'sys.path;\n'
'- missing module, class, function or variable;\n\n'
'Debugged import:\n\n%s\n\n'
'Original exception:\n\n%s: %s')
name = ''
tracked = []
for part in import_name.replace(':', '.').split('.'):
name += (name and '.') + part
imported = import_string(name, silent=True)
if imported:
tracked.append((name, getattr(imported, '__file__', None)))
else:
track = ['- %r found in %r.' % (n, i) for n, i in tracked]
track.append('- %r not found.' % name)
msg = msg % (import_name, '\n'.join(track),
exception.__class__.__name__, str(exception))
break
ImportError.__init__(self, msg)
def __repr__(self):
return '<%s(%r, %r)>' % (self.__class__.__name__, self.import_name,
self.exception)
# circular dependencies
from werkzeug.http import quote_header_value, unquote_header_value, \
cookie_date
# DEPRECATED
# these objects were previously in this module as well. we import
# them here for backwards compatibility with old pickles.
from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
Headers, EnvironHeaders
from werkzeug.http import parse_cookie, dump_cookie
| apache-2.0 | 7,245,585,580,171,192,000 | 36.11293 | 83 | 0.606765 | false |
sumedhasingla/VTK | Filters/General/Testing/Python/warplens.py | 20 | 1517 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# load in the texture map
#
pngReader = vtk.vtkPNGReader()
pngReader.SetFileName(VTK_DATA_ROOT + "/Data/camscene.png")
pngReader.Update()
xWidth = pngReader.GetOutput().GetDimensions()[0]
yHeight = pngReader.GetOutput().GetDimensions()[1]
wl = vtk.vtkWarpLens()
wl.SetInputConnection(pngReader.GetOutputPort())
wl.SetPrincipalPoint(2.4507, 1.7733)
wl.SetFormatWidth(4.792)
wl.SetFormatHeight(3.6)
wl.SetImageWidth(xWidth)
wl.SetImageHeight(yHeight)
wl.SetK1(0.01307)
wl.SetK2(0.0003102)
wl.SetP1(1.953e-005)
wl.SetP2(-9.655e-005)
gf = vtk.vtkGeometryFilter()
gf.SetInputConnection(wl.GetOutputPort())
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(gf.GetOutputPort())
strip = vtk.vtkStripper()
strip.SetInputConnection(tf.GetOutputPort())
strip.SetMaximumLength(250)
dsm = vtk.vtkPolyDataMapper()
dsm.SetInputConnection(strip.GetOutputPort())
planeActor = vtk.vtkActor()
planeActor.SetMapper(dsm)
# Add the actors to the renderer, set the background and size
ren1.AddActor(planeActor)
ren1.SetBackground(0.1, 0.2, 0.4)
renWin.SetSize(300, 300)
# render the image
iren.Initialize()
renWin.Render()
ren1.GetActiveCamera().Zoom(1.4)
renWin.Render()
#iren.Start()
| bsd-3-clause | -7,738,393,044,938,293,000 | 21.984848 | 61 | 0.775873 | false |