repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
duboviy/pybenchmark | pybenchmark/gprofiler.py | 1 | 3896 | """
Module for visualizing Python code profiles using the Chrome developer tools.
Example usage:
>>> profiler = Profiler()
>>> profiler.start()
>>> my_expensive_code()
>>> profiler.stop()
>>> with open('my.cpuprofile', 'w') as f:
... f.write(profiler.output())
In a gevented environnment, context switches can make things confusing. Data
collection can be limited to a single greenlet by passing
>>> profiler = Profiler(target_greenlet = gevent.getcurrent())
"""
import os
import sys
import json
import timeit
try:
import gevent
except ImportError:
gevent = None
class Node(object):
def __init__(self, name, id_):
self.name = name
self.id_ = id_
self.children = {}
self.hitCount = 1
def serialize(self):
res = {
'functionName': self.name,
'hitCount': self.hitCount,
'children': [c.serialize() for c in self.children.values()],
'scriptId': '1',
'url': '',
'lineNumber': 1,
'columnNumber': 1,
'deoptReason': '',
'id': self.id_,
'callUID': self.id_
}
return res
def add(self, frames, id_gen):
if not frames:
self.hitCount += 1
return
head = frames[0]
child = self.children.get(head)
if child is None:
child = Node(name=head, id_=id_gen())
self.children[head] = child
child.add(frames[1:], id_gen)
class GProfiler(object):
def __init__(self, target_greenlet=None, interval=0.0001):
self.target_greenlet_id = (
id(target_greenlet) if target_greenlet else None)
self.interval = interval
self.started = None
self.last_profile = None
self.root = Node('head', 1)
self.nextId = 1
self.samples = []
self.timestamps = []
def _id_generator(self):
self.nextId += 1
return self.nextId
def _profile(self, frame, event, arg): # pylint: disable=W0613
if event == 'call':
self._record_frame(frame.f_back)
def _record_frame(self, frame):
if self.target_greenlet_id and id(gevent.getcurrent()) != self.target_greenlet_id:
return
now = timeit.default_timer()
if self.last_profile is not None:
if now - self.last_profile < self.interval:
return
self.last_profile = now
self.timestamps.append(int(1e6 * now))
stack = []
while frame is not None:
stack.append(self._format_frame(frame))
frame = frame.f_back
stack.reverse()
self.root.add(stack, self._id_generator)
self.samples.append(self.nextId)
@staticmethod
def _format_frame(frame):
return '{}({})'.format(frame.f_code.co_name,
frame.f_globals.get('__name__'))
def output(self):
if self.samples:
data = {
'startTime': self.started,
'endTime': 0.000001 * self.timestamps[-1],
'timestamps': self.timestamps,
'samples': self.samples,
'head': self.root.serialize()
}
else:
data = {}
return json.dumps(data)
def start(self):
sys.setprofile(self._profile)
self.started = timeit.default_timer()
print("# Running in profile mode. #")
@staticmethod
def stop():
sys.setprofile(None)
print("# Profile mode stopped. #")
def __enter__(self):
self.start()
return self
def __exit__(self, type_, value, traceback_):
self.stop()
filename = './pybenchmark_%s_.cpuprofile' % os.getpid()
with open(filename, 'w') as f:
f.write(self.output())
print(("Written profile file '%s'." % (filename)))
| mit |
sorenk/ansible | test/sanity/import/importer.py | 29 | 6122 | #!/usr/bin/env python
"""Import the given python module(s) and report error(s) encountered."""
from __future__ import absolute_import, print_function
import contextlib
import imp
import os
import re
import sys
import traceback
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import ansible.module_utils.basic
import ansible.module_utils.common.removed
class ImporterAnsibleModuleException(Exception):
"""Exception thrown during initialization of ImporterAnsibleModule."""
pass
class ImporterAnsibleModule(object):
"""Replacement for AnsibleModule to support import testing."""
def __init__(self, *args, **kwargs):
raise ImporterAnsibleModuleException()
# stop Ansible module execution during AnsibleModule instantiation
ansible.module_utils.basic.AnsibleModule = ImporterAnsibleModule
# no-op for _load_params since it may be called before instantiating AnsibleModule
ansible.module_utils.basic._load_params = lambda *args, **kwargs: {}
# no-op for removed_module since it is called in place of AnsibleModule instantiation
ansible.module_utils.common.removed.removed_module = lambda *args, **kwargs: None
def main():
"""Main program function."""
base_dir = os.getcwd()
messages = set()
for path in sys.argv[1:] or sys.stdin.read().splitlines():
test_python_module(path, base_dir, messages, False)
test_python_module(path, base_dir, messages, True)
if messages:
exit(10)
def test_python_module(path, base_dir, messages, ansible_module):
if ansible_module:
# importing modules with __main__ under Python 2.6 exits with status code 1
if sys.version_info < (2, 7):
return
# only run __main__ protected code for Ansible modules
if not path.startswith('lib/ansible/modules/'):
return
# async_wrapper is not an Ansible module
if path == 'lib/ansible/modules/utilities/logic/async_wrapper.py':
return
# run code protected by __name__ conditional
name = '__main__'
# show the Ansible module responsible for the exception, even if it was thrown in module_utils
filter_dir = os.path.join(base_dir, 'lib/ansible/modules')
else:
# do not run code protected by __name__ conditional
name = 'module_import_test'
# show the Ansible file responsible for the exception, even if it was thrown in 3rd party code
filter_dir = base_dir
capture = Capture()
try:
with open(path, 'r') as module_fd:
with capture_output(capture):
imp.load_module(name, module_fd, os.path.abspath(path), ('.py', 'r', imp.PY_SOURCE))
capture_report(path, capture, messages)
except ImporterAnsibleModuleException:
# module instantiated AnsibleModule without raising an exception
pass
except BaseException as ex: # pylint: disable=locally-disabled, broad-except
capture_report(path, capture, messages)
exc_type, _, exc_tb = sys.exc_info()
message = str(ex)
results = list(reversed(traceback.extract_tb(exc_tb)))
source = None
line = 0
offset = 0
for result in results:
if result[0].startswith(filter_dir):
source = result[0][len(base_dir) + 1:].replace('test/sanity/import/', '')
line = result[1] or 0
break
if not source:
# If none of our source files are found in the traceback, report the file we were testing.
# I haven't been able to come up with a test case that encounters this issue yet.
source = path
message += ' (in %s:%d)' % (results[-1][0], results[-1][1] or 0)
elif isinstance(ex, SyntaxError):
if ex.filename.endswith(path): # pylint: disable=locally-disabled, no-member
# A SyntaxError in the source we're importing will have the correct path, line and offset.
# However, the traceback will report the path to this importer.py script instead.
# We'll use the details from the SyntaxError in this case, as it's more accurate.
source = path
line = ex.lineno or 0 # pylint: disable=locally-disabled, no-member
offset = ex.offset or 0 # pylint: disable=locally-disabled, no-member
message = str(ex)
# Hack to remove the filename and line number from the message, if present.
message = message.replace(' (%s, line %d)' % (os.path.basename(path), line), '')
message = re.sub(r'\n *', ': ', message)
error = '%s:%d:%d: %s: %s' % (source, line, offset, exc_type.__name__, message)
report_message(error, messages)
class Capture(object):
"""Captured output and/or exception."""
def __init__(self):
self.stdout = StringIO()
self.stderr = StringIO()
def capture_report(path, capture, messages):
"""Report on captured output.
:type path: str
:type capture: Capture
:type messages: set[str]
"""
if capture.stdout.getvalue():
message = '%s:%d:%d: %s: %s' % (path, 0, 0, 'Output', 'Import resulted in output to stdout.')
report_message(message, messages)
if capture.stderr.getvalue():
message = '%s:%d:%d: %s: %s' % (path, 0, 0, 'Output', 'Import resulted in output to stderr.')
report_message(message, messages)
def report_message(message, messages):
"""Report message if not already reported.
:type message: str
:type messages: set[str]
"""
if message not in messages:
messages.add(message)
print(message)
@contextlib.contextmanager
def capture_output(capture):
"""Capture sys.stdout and sys.stderr.
:type capture: Capture
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = capture.stdout
sys.stderr = capture.stderr
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
if __name__ == '__main__':
main()
| gpl-3.0 |
teeple/pns_server | work/install/Python-2.7.4/Lib/pyclbr.py | 139 | 13388 | """Parse a Python module and describe its classes and methods.
Parse enough of a Python file to recognize imports and class and
method definitions, and to find out the superclasses of a class.
The interface consists of a single function:
readmodule_ex(module [, path])
where module is the name of a Python module, and path is an optional
list of directories where the module is to be searched. If present,
path is prepended to the system search path sys.path. The return
value is a dictionary. The keys of the dictionary are the names of
the classes defined in the module (including classes that are defined
via the from XXX import YYY construct). The values are class
instances of the class Class defined here. One special key/value pair
is present for packages: the key '__path__' has a list as its value
which contains the package search path.
A class is described by the class Class in this module. Instances
of this class have the following instance variables:
module -- the module name
name -- the name of the class
super -- a list of super classes (Class instances)
methods -- a dictionary of methods
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
The dictionary of methods uses the method names as keys and the line
numbers on which the method was defined as values.
If the name of a super class is not recognized, the corresponding
entry in the list of super classes is not a class instance but a
string giving the name of the super class. Since import statements
are recognized and imported modules are scanned as well, this
shouldn't happen often.
A function is described by the class Function in this module.
Instances of this class have the following instance variables:
module -- the module name
name -- the name of the class
file -- the file in which the class was defined
lineno -- the line in the file on which the class statement occurred
"""
import sys
import imp
import tokenize
from token import NAME, DEDENT, OP
from operator import itemgetter
__all__ = ["readmodule", "readmodule_ex", "Class", "Function"]
_modules = {} # cache of modules we've seen
# each Python class is represented by an instance of this class
class Class:
'''Class to represent a Python class.'''
def __init__(self, module, name, super, file, lineno):
self.module = module
self.name = name
if super is None:
super = []
self.super = super
self.methods = {}
self.file = file
self.lineno = lineno
def _addmethod(self, name, lineno):
self.methods[name] = lineno
class Function:
'''Class to represent a top-level Python function'''
def __init__(self, module, name, file, lineno):
self.module = module
self.name = name
self.file = file
self.lineno = lineno
def readmodule(module, path=None):
'''Backwards compatible interface.
Call readmodule_ex() and then only keep Class objects from the
resulting dictionary.'''
res = {}
for key, value in _readmodule(module, path or []).items():
if isinstance(value, Class):
res[key] = value
return res
def readmodule_ex(module, path=None):
'''Read a module file and return a dictionary of classes.
Search for MODULE in PATH and sys.path, read and parse the
module and return a dictionary with one entry for each class
found in the module.
'''
return _readmodule(module, path or [])
def _readmodule(module, path, inpackage=None):
'''Do the hard work for readmodule[_ex].
If INPACKAGE is given, it must be the dotted name of the package in
which we are searching for a submodule, and then PATH must be the
package search path; otherwise, we are searching for a top-level
module, and PATH is combined with sys.path.
'''
# Compute the full module name (prepending inpackage if set)
if inpackage is not None:
fullmodule = "%s.%s" % (inpackage, module)
else:
fullmodule = module
# Check in the cache
if fullmodule in _modules:
return _modules[fullmodule]
# Initialize the dict for this module's contents
dict = {}
# Check if it is a built-in module; we don't do much for these
if module in sys.builtin_module_names and inpackage is None:
_modules[module] = dict
return dict
# Check for a dotted module name
i = module.rfind('.')
if i >= 0:
package = module[:i]
submodule = module[i+1:]
parent = _readmodule(package, path, inpackage)
if inpackage is not None:
package = "%s.%s" % (inpackage, package)
if not '__path__' in parent:
raise ImportError('No package named {}'.format(package))
return _readmodule(submodule, parent['__path__'], package)
# Search the path for the module
f = None
if inpackage is not None:
f, fname, (_s, _m, ty) = imp.find_module(module, path)
else:
f, fname, (_s, _m, ty) = imp.find_module(module, path + sys.path)
if ty == imp.PKG_DIRECTORY:
dict['__path__'] = [fname]
path = [fname] + path
f, fname, (_s, _m, ty) = imp.find_module('__init__', [fname])
_modules[fullmodule] = dict
if ty != imp.PY_SOURCE:
# not Python source, can't do anything with this module
f.close()
return dict
stack = [] # stack of (class, indent) pairs
g = tokenize.generate_tokens(f.readline)
try:
for tokentype, token, start, _end, _line in g:
if tokentype == DEDENT:
lineno, thisindent = start
# close nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
elif token == 'def':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, meth_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
if stack:
cur_class = stack[-1][0]
if isinstance(cur_class, Class):
# it's a method
cur_class._addmethod(meth_name, lineno)
# else it's a nested def
else:
# it's a function
dict[meth_name] = Function(fullmodule, meth_name,
fname, lineno)
stack.append((None, thisindent)) # Marker for nested fns
elif token == 'class':
lineno, thisindent = start
# close previous nested classes and defs
while stack and stack[-1][1] >= thisindent:
del stack[-1]
tokentype, class_name, start = g.next()[0:3]
if tokentype != NAME:
continue # Syntax error
# parse what follows the class name
tokentype, token, start = g.next()[0:3]
inherit = None
if token == '(':
names = [] # List of superclasses
# there's a list of superclasses
level = 1
super = [] # Tokens making up current superclass
while True:
tokentype, token, start = g.next()[0:3]
if token in (')', ',') and level == 1:
n = "".join(super)
if n in dict:
# we know this super class
n = dict[n]
else:
c = n.split('.')
if len(c) > 1:
# super class is of the form
# module.class: look in module for
# class
m = c[-2]
c = c[-1]
if m in _modules:
d = _modules[m]
if c in d:
n = d[c]
names.append(n)
super = []
if token == '(':
level += 1
elif token == ')':
level -= 1
if level == 0:
break
elif token == ',' and level == 1:
pass
# only use NAME and OP (== dot) tokens for type name
elif tokentype in (NAME, OP) and level == 1:
super.append(token)
# expressions in the base list are not supported
inherit = names
cur_class = Class(fullmodule, class_name, inherit,
fname, lineno)
if not stack:
dict[class_name] = cur_class
stack.append((cur_class, thisindent))
elif token == 'import' and start[1] == 0:
modules = _getnamelist(g)
for mod, _mod2 in modules:
try:
# Recursively read the imported module
if inpackage is None:
_readmodule(mod, path)
else:
try:
_readmodule(mod, path, inpackage)
except ImportError:
_readmodule(mod, [])
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
pass
elif token == 'from' and start[1] == 0:
mod, token = _getname(g)
if not mod or token != "import":
continue
names = _getnamelist(g)
try:
# Recursively read the imported module
d = _readmodule(mod, path, inpackage)
except:
# If we can't find or parse the imported module,
# too bad -- don't die here.
continue
# add any classes that were defined in the imported module
# to our name space if they were mentioned in the list
for n, n2 in names:
if n in d:
dict[n2 or n] = d[n]
elif n == '*':
# don't add names that start with _
for n in d:
if n[0] != '_':
dict[n] = d[n]
except StopIteration:
pass
f.close()
return dict
def _getnamelist(g):
# Helper to get a comma-separated list of dotted names plus 'as'
# clauses. Return a list of pairs (name, name2) where name2 is
# the 'as' name, or None if there is no 'as' clause.
names = []
while True:
name, token = _getname(g)
if not name:
break
if token == 'as':
name2, token = _getname(g)
else:
name2 = None
names.append((name, name2))
while token != "," and "\n" not in token:
token = g.next()[1]
if token != ",":
break
return names
def _getname(g):
# Helper to get a dotted name, return a pair (name, token) where
# name is the dotted name, or None if there was no dotted name,
# and token is the next input token.
parts = []
tokentype, token = g.next()[0:2]
if tokentype != NAME and token != '*':
return (None, token)
parts.append(token)
while True:
tokentype, token = g.next()[0:2]
if token != '.':
break
tokentype, token = g.next()[0:2]
if tokentype != NAME:
break
parts.append(token)
return (".".join(parts), token)
def _main():
# Main program for testing.
import os
mod = sys.argv[1]
if os.path.exists(mod):
path = [os.path.dirname(mod)]
mod = os.path.basename(mod)
if mod.lower().endswith(".py"):
mod = mod[:-3]
else:
path = []
dict = readmodule_ex(mod, path)
objs = dict.values()
objs.sort(lambda a, b: cmp(getattr(a, 'lineno', 0),
getattr(b, 'lineno', 0)))
for obj in objs:
if isinstance(obj, Class):
print "class", obj.name, obj.super, obj.lineno
methods = sorted(obj.methods.iteritems(), key=itemgetter(1))
for name, lineno in methods:
if name != "__path__":
print " def", name, lineno
elif isinstance(obj, Function):
print "def", obj.name, obj.lineno
if __name__ == "__main__":
_main()
| gpl-2.0 |
federicoviola/alpha | wsgi/migrations/versions/42caf438dcdf_.py | 1 | 3965 | """empty message
Revision ID: 42caf438dcdf
Revises: None
Create Date: 2015-05-15 13:14:21.980616
"""
# revision identifiers, used by Alembic.
revision = '42caf438dcdf'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('university', sa.String(length=64), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_table('follows')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
### end Alembic commands ###
| mit |
christianurich/DynaMind-ToolBox | DynaMind-Performance-Assessment/cd3/data/scripts/cd3.py | 5 | 1608 | import sys
sys.path.append("build/")
sys.path.append("data/scripts")
import pycd3 as cd
class PythonNodeFactory(cd.NodeFactory):
def __init__(self, node_klass):
cd.NodeFactory.__init__(self)
self.klass = node_klass
def createNode(self):
return self.klass()
def getNodeName(self):
return self.klass.__name__
def defineflow():
fd = cd.FlowDefinition()
fd['Q'] = cd.CalculationUnit.flow
fd['C0'] = cd.CalculationUnit.concentration
fd['C1'] = cd.CalculationUnit.concentration
cd.Flow.define(fd)
def register_python_nodes(nr, module):
__import__(module, None, None, [], 1)
for n in cd.Node.__subclasses__():
if nr.contains(n.__name__):
continue
cd.log("registering python node %s" % n.__name__)
nr.addNodeFactory(PythonNodeFactory(n))
def setup_test_model(model, nr, simulation):
src = nr.createNode("RandomCatchment")
src.init(0, 7200, 300)
src.setId('source')
snk = nr.createNode("FileOut")
snk.init(0, 7200, 300)
snk.setId('sink')
con = simulation.createConnection(src, 'out', snk, 'in')
model.addNode(src)
model.addNode(snk)
model.addConnection(con)
simulation.setModel(model)
sp = cd.SimulationParameters(0, 72000, 300)
simulation.setSimParams(sp)
def main():
cd.init()
defineflow()
nr = cd.NodeRegistry()
nr.addNativePlugin("nodes")
sr = cd.SimulationRegistry()
sr.addNativePlugin("nodes")
register_python_nodes(nr, 'testnodes')
sim = sr.createSimulation("OrderedPipeSimulation")
model = cd.Model()
setup_test_model(model, nr, sim)
sim.start(0)
if __name__ == "__main__":
main()
| gpl-2.0 |
patricksanders/muse | muse/apis/pyechonest/sandbox.py | 23 | 1849 | #!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2011-10-21.
The Sandbox module loosely covers http://developer.echonest.com/docs/v4/sandbox.html
Refer to the official api documentation if you are unsure about something.
"""
try:
import json
except ImportError:
import simplejson as json
import datetime
import util
from proxies import ResultList
def list(sandbox_name, results=15, start=0):
"""
Returns a list of all assets available in this sandbox
Args:
sandbox_name (str): A string representing the name of the sandbox
Kwargs:
results (int): An integer number of results to return
start (int): An integer starting value for the result set
Returns:
A list of asset dictionaries
Example:
>>> sandbox.list('bluenote')
[{}, {}]
>>>
"""
result = util.callm("%s/%s" % ('sandbox', 'list'), {'sandbox':sandbox_name, 'results': results, 'start': start})
assets = result['response']['assets']
start = result['response']['start']
total = result['response']['total']
return ResultList(assets, start, total)
def access(sandbox_name, asset_ids):
"""
Returns a list of assets with expiring access urls that can be used to download them
*Requires Oauth*
Args:
sandbox_name (str): A string representing the name of the sandbox
asset_ids (list): A list of asset_ids (str) to fetch
Kwargs:
Returns:
A list of asset dictionaries
Example:
>>> sandbox.access('bluenote', ['12345'])
[{}, {}]
>>>
"""
result = util.oauthgetm("%s/%s" % ('sandbox', 'access'), {'sandbox':sandbox_name, 'id':asset_ids})
return result['response']['assets']
| bsd-3-clause |
Rajeshkumar90/ansible-modules-extras | web_infrastructure/jboss.py | 48 | 5102 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Jeroen Hoekx <jeroen.hoekx@dsquare.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
module: jboss
version_added: "1.4"
short_description: deploy applications to JBoss
description:
- Deploy applications to JBoss standalone using the filesystem
options:
deployment:
required: true
description:
- The name of the deployment
src:
required: false
description:
- The remote path of the application ear or war to deploy
deploy_path:
required: false
default: /var/lib/jbossas/standalone/deployments
description:
- The location in the filesystem where the deployment scanner listens
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the application should be deployed or undeployed
notes:
- "The JBoss standalone deployment-scanner has to be enabled in standalone.xml"
- "Ensure no identically named application is deployed through the JBoss CLI"
author: "Jeroen Hoekx (@jhoekx)"
"""
EXAMPLES = """
# Deploy a hello world application
- jboss:
src: /tmp/hello-1.0-SNAPSHOT.war
deployment: hello.war
state: present
# Update the hello world application
- jboss:
src: /tmp/hello-1.1-SNAPSHOT.war
deployment: hello.war
state: present
# Undeploy the hello world application
- jboss:
deployment: hello.war
state: absent
"""
import os
import shutil
import time
def is_deployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment)))
def is_undeployed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment)))
def is_failed(deploy_path, deployment):
return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment)))
def main():
module = AnsibleModule(
argument_spec = dict(
src=dict(),
deployment=dict(required=True),
deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'),
state=dict(choices=['absent', 'present'], default='present'),
),
)
changed = False
src = module.params['src']
deployment = module.params['deployment']
deploy_path = module.params['deploy_path']
state = module.params['state']
if state == 'present' and not src:
module.fail_json(msg="Argument 'src' required.")
if not os.path.exists(deploy_path):
module.fail_json(msg="deploy_path does not exist.")
deployed = is_deployed(deploy_path, deployment)
if state == 'present' and not deployed:
if not os.path.exists(src):
module.fail_json(msg='Source file %s does not exist.'%(src))
if is_failed(deploy_path, deployment):
### Clean up old failed deployment
os.remove(os.path.join(deploy_path, "%s.failed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'present' and deployed:
if module.sha1(src) != module.sha1(os.path.join(deploy_path, deployment)):
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
shutil.copyfile(src, os.path.join(deploy_path, deployment))
deployed = False
while not deployed:
deployed = is_deployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Deploying %s failed.'%(deployment))
time.sleep(1)
changed = True
if state == 'absent' and deployed:
os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment)))
while deployed:
deployed = not is_undeployed(deploy_path, deployment)
if is_failed(deploy_path, deployment):
module.fail_json(msg='Undeploying %s failed.'%(deployment))
time.sleep(1)
changed = True
module.exit_json(changed=changed)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
thewball/ThewCoin | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
Agana/MyBlogAgain | django/views/decorators/cache.py | 229 | 3639 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.decorators import decorator_from_middleware_with_args, available_attrs
from django.utils.cache import patch_cache_control, add_never_cache_headers
from django.middleware.cache import CacheMiddleware
def cache_page(*args, **kwargs):
"""
Decorator for views that tries getting the page from the cache and
populates the cache if the page isn't in the cache yet.
The cache is keyed by the URL and some data from the headers.
Additionally there is the key prefix that is used to distinguish different
cache areas in a multi-site setup. You could use the
sites.get_current().domain, for example, as that is unique across a Django
project.
Additionally, all headers from the response's Vary header will be taken
into account on caching -- just like the middleware does.
"""
# We need backwards compatibility with code which spells it this way:
# def my_view(): pass
# my_view = cache_page(my_view, 123)
# and this way:
# my_view = cache_page(123)(my_view)
# and this:
# my_view = cache_page(my_view, 123, key_prefix="foo")
# and this:
# my_view = cache_page(123, key_prefix="foo")(my_view)
# and possibly this way (?):
# my_view = cache_page(123, my_view)
# and also this way:
# my_view = cache_page(my_view)
# and also this way:
# my_view = cache_page()(my_view)
# We also add some asserts to give better error messages in case people are
# using other ways to call cache_page that no longer work.
cache_alias = kwargs.pop('cache', None)
key_prefix = kwargs.pop('key_prefix', None)
assert not kwargs, "The only keyword arguments are cache and key_prefix"
if len(args) > 1:
assert len(args) == 2, "cache_page accepts at most 2 arguments"
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[1], cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
elif callable(args[1]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)(args[1])
else:
assert False, "cache_page must be passed a view function if called with two arguments"
elif len(args) == 1:
if callable(args[0]):
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)(args[0])
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_timeout=args[0], cache_alias=cache_alias, key_prefix=key_prefix)
else:
return decorator_from_middleware_with_args(CacheMiddleware)(cache_alias=cache_alias, key_prefix=key_prefix)
def cache_control(**kwargs):
def _cache_controller(viewfunc):
def _cache_controlled(request, *args, **kw):
response = viewfunc(request, *args, **kw)
patch_cache_control(response, **kwargs)
return response
return wraps(viewfunc, assigned=available_attrs(viewfunc))(_cache_controlled)
return _cache_controller
def never_cache(view_func):
"""
Decorator that adds headers to a response so that it will
never be cached.
"""
def _wrapped_view_func(request, *args, **kwargs):
response = view_func(request, *args, **kwargs)
add_never_cache_headers(response)
return response
return wraps(view_func, assigned=available_attrs(view_func))(_wrapped_view_func)
| bsd-3-clause |
woggle/mesos-old | frameworks/hadoop-0.20.2/src/contrib/hod/hodlib/ServiceProxy/serviceProxy.py | 182 | 2235 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""HOD Service Proxy Implementation"""
# -*- python -*-
import sys, time, signal, httplib, socket, threading
import sha, base64, hmac
import xml.dom.minidom
from hodlib.Common.socketServers import hodHTTPServer
from hodlib.Common.hodsvc import hodBaseService
from hodlib.Common.threads import loop
from hodlib.Common.tcp import tcpSocket
from hodlib.Common.util import get_exception_string
from hodlib.Common.AllocationManagerUtil import *
class svcpxy(hodBaseService):
def __init__(self, config):
hodBaseService.__init__(self, 'serviceProxy', config['service_proxy'],
xrtype='twisted')
self.amcfg=config['allocation_manager']
def _xr_method_isProjectUserValid(self, userid, project, ignoreErrors = False, timeOut = 15):
return self.isProjectUserValid(userid, project, ignoreErrors, timeOut)
def isProjectUserValid(self, userid, project, ignoreErrors, timeOut):
"""Method thats called upon by
the hodshell to verify if the
specified (user, project) combination
is valid"""
self.logs['main'].info("Begin isProjectUserValid()")
am = AllocationManagerUtil.getAllocationManager(self.amcfg['id'],
self.amcfg,
self.logs['main'])
self.logs['main'].info("End isProjectUserValid()")
return am.getQuote(userid, project)
| apache-2.0 |
mdaniel/intellij-community | plugins/hg4idea/testData/bin/mercurial/commandserver.py | 93 | 6720 | # commandserver.py - communicate with Mercurial's API over a pipe
#
# Copyright Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import struct
import sys, os
import dispatch, encoding, util
logfile = None
def log(*args):
if not logfile:
return
for a in args:
logfile.write(str(a))
logfile.flush()
class channeledoutput(object):
"""
Write data from in_ to out in the following format:
data length (unsigned int),
data
"""
def __init__(self, in_, out, channel):
self.in_ = in_
self.out = out
self.channel = channel
def write(self, data):
if not data:
return
self.out.write(struct.pack('>cI', self.channel, len(data)))
self.out.write(data)
self.out.flush()
def __getattr__(self, attr):
if attr in ('isatty', 'fileno'):
raise AttributeError(attr)
return getattr(self.in_, attr)
class channeledinput(object):
"""
Read data from in_.
Requests for input are written to out in the following format:
channel identifier - 'I' for plain input, 'L' line based (1 byte)
how many bytes to send at most (unsigned int),
The client replies with:
data length (unsigned int), 0 meaning EOF
data
"""
maxchunksize = 4 * 1024
def __init__(self, in_, out, channel):
self.in_ = in_
self.out = out
self.channel = channel
def read(self, size=-1):
if size < 0:
# if we need to consume all the clients input, ask for 4k chunks
# so the pipe doesn't fill up risking a deadlock
size = self.maxchunksize
s = self._read(size, self.channel)
buf = s
while s:
s = self._read(size, self.channel)
buf += s
return buf
else:
return self._read(size, self.channel)
def _read(self, size, channel):
if not size:
return ''
assert size > 0
# tell the client we need at most size bytes
self.out.write(struct.pack('>cI', channel, size))
self.out.flush()
length = self.in_.read(4)
length = struct.unpack('>I', length)[0]
if not length:
return ''
else:
return self.in_.read(length)
def readline(self, size=-1):
if size < 0:
size = self.maxchunksize
s = self._read(size, 'L')
buf = s
# keep asking for more until there's either no more or
# we got a full line
while s and s[-1] != '\n':
s = self._read(size, 'L')
buf += s
return buf
else:
return self._read(size, 'L')
def __iter__(self):
return self
def next(self):
l = self.readline()
if not l:
raise StopIteration
return l
def __getattr__(self, attr):
if attr in ('isatty', 'fileno'):
raise AttributeError(attr)
return getattr(self.in_, attr)
class server(object):
"""
Listens for commands on stdin, runs them and writes the output on a channel
based stream to stdout.
"""
def __init__(self, ui, repo, mode):
self.cwd = os.getcwd()
logpath = ui.config("cmdserver", "log", None)
if logpath:
global logfile
if logpath == '-':
# write log on a special 'd' (debug) channel
logfile = channeledoutput(sys.stdout, sys.stdout, 'd')
else:
logfile = open(logpath, 'a')
# the ui here is really the repo ui so take its baseui so we don't end
# up with its local configuration
self.ui = repo.baseui
self.repo = repo
self.repoui = repo.ui
if mode == 'pipe':
self.cerr = channeledoutput(sys.stderr, sys.stdout, 'e')
self.cout = channeledoutput(sys.stdout, sys.stdout, 'o')
self.cin = channeledinput(sys.stdin, sys.stdout, 'I')
self.cresult = channeledoutput(sys.stdout, sys.stdout, 'r')
self.client = sys.stdin
else:
raise util.Abort(_('unknown mode %s') % mode)
def _read(self, size):
if not size:
return ''
data = self.client.read(size)
# is the other end closed?
if not data:
raise EOFError
return data
def runcommand(self):
""" reads a list of \0 terminated arguments, executes
and writes the return code to the result channel """
length = struct.unpack('>I', self._read(4))[0]
if not length:
args = []
else:
args = self._read(length).split('\0')
# copy the uis so changes (e.g. --config or --verbose) don't
# persist between requests
copiedui = self.ui.copy()
self.repo.baseui = copiedui
self.repo.ui = self.repo.dirstate._ui = self.repoui.copy()
self.repo.invalidate()
self.repo.invalidatedirstate()
req = dispatch.request(args[:], copiedui, self.repo, self.cin,
self.cout, self.cerr)
ret = dispatch.dispatch(req) or 0 # might return None
# restore old cwd
if '--cwd' in args:
os.chdir(self.cwd)
self.cresult.write(struct.pack('>i', int(ret)))
def getencoding(self):
""" writes the current encoding to the result channel """
self.cresult.write(encoding.encoding)
def serveone(self):
cmd = self.client.readline()[:-1]
if cmd:
handler = self.capabilities.get(cmd)
if handler:
handler(self)
else:
# clients are expected to check what commands are supported by
# looking at the servers capabilities
raise util.Abort(_('unknown command %s') % cmd)
return cmd != ''
capabilities = {'runcommand' : runcommand,
'getencoding' : getencoding}
def serve(self):
hellomsg = 'capabilities: ' + ' '.join(sorted(self.capabilities))
hellomsg += '\n'
hellomsg += 'encoding: ' + encoding.encoding
# write the hello msg in -one- chunk
self.cout.write(hellomsg)
try:
while self.serveone():
pass
except EOFError:
# we'll get here if the client disconnected while we were reading
# its request
return 1
return 0
| apache-2.0 |
jakevdp/seaborn | doc/sphinxext/ipython_directive.py | 37 | 37557 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
from __future__ import unicode_literals
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import os
import re
import sys
import tempfile
import ast
from pandas.compat import zip, range, map, lmap, u, cStringIO as StringIO
import warnings
# To keep compatibility with various python versions
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Third-party
import sphinx
from docutils.parsers.rst import directives
from docutils import nodes
from sphinx.util.compat import Directive
# Our own
from IPython import Config, InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
text_type = str
else:
from StringIO import StringIO
text_type = unicode
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one ouput, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# we're assuming at most one decorator -- may need to
# rethink
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class DecodingStringIO(StringIO, object):
def __init__(self,buf='',encodings=('utf8',), *args, **kwds):
super(DecodingStringIO, self).__init__(buf, *args, **kwds)
self.set_encodings(encodings)
def set_encodings(self, encodings):
self.encodings = encodings
def write(self,data):
if isinstance(data, text_type):
return super(DecodingStringIO, self).write(data)
else:
for enc in self.encodings:
try:
data = data.decode(enc)
return super(DecodingStringIO, self).write(data)
except :
pass
# default to brute utf8 if no encoding succeded
return super(DecodingStringIO, self).write(data.decode('utf8', 'replace'))
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None,state=None):
self.cout = DecodingStringIO(u'')
if exec_lines is None:
exec_lines = []
self.state = state
# Create config object for IPython
config = Config()
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
try:
source_raw = splitter.source_raw_reset()[1]
except:
# recent ipython #4504
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
# set the encodings to be used by DecodingStringIO
# to convert the execution output into unicode if
# needed. this attrib is set by IpythonDirective.run()
# based on the specified block options, defaulting to ['ut
self.cout.set_encodings(self.output_encoding)
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# the "rest" is the standard output of the
# input, which needs to be added in
# verbatim mode
ret.append(rest)
self.cout.seek(0)
output = self.cout.read()
if not is_suppress and not is_semicolon:
ret.append(output)
elif is_semicolon: # get spacing right
ret.append('')
# context information
filename = self.state.document.current_source
lineno = self.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write('-' * 76 + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, output, is_doctest, decorator, image_file,
image_directive)
def process_output(self, data, output_prompt,
input_lines, output, is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
TAB = ' ' * 4
if is_doctest and output is not None:
found = output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = ('plt.gcf().savefig("%s", bbox_inches="tight", '
'dpi=100)' % image_file)
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
(out_data, input_lines, output, is_doctest, decorator,
image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
out_data = \
self.process_output(data, output_prompt,
input_lines, output, is_doctest,
decorator, image_file)
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag,
'output_encoding': directives.unchanged_required
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
confdir = self.state.document.settings.env.app.confdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path
if isinstance(savefig_dir, list):
savefig_dir = savefig_dir[0] # safe to assume only one path?
savefig_dir = os.path.join(confdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines, self.state)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
self.shell.output_encoding = [options.get('output_encoding', 'utf8')]
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' %s'%line for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines)>2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| bsd-3-clause |
MissCatLady/AlarmEZ | venv/lib/python2.7/site-packages/setuptools/command/build_py.py | 147 | 10457 | import os.path, sys, fnmatch
from distutils.command.build_py import build_py as _build_py
from distutils.util import convert_path
from glob import glob
try:
from distutils.util import Mixin2to3 as _Mixin2to3
# add support for converting doctests that is missing in 3.1 distutils
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests = False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
log.info("Fixing "+" ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names: return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
# Nothing done in 2.x
pass
class build_py(_build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
_build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = self.distribution.exclude_package_data or {}
if 'data_files' in self.__dict__: del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(_build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self,attr):
if attr=='data_files': # lazily compute data files
self.data_files = files = self._get_data_files(); return files
return _build_py.__getattr__(self,attr)
def build_module(self, module, module_file, package):
outfile, copied = _build_py.build_module(self, module, module_file, package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
data = []
for package in self.packages or ():
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Length of path to strip from found files
plen = len(src_dir)+1
# Strip directory from globbed filenames
filenames = [
file[plen:] for file in self.find_data_files(package, src_dir)
]
data.append( (package, src_dir, build_dir, filenames) )
return data
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
globs = (self.package_data.get('', [])
+ self.package_data.get(package, []))
files = self.manifest_files.get(package, [])[:]
for pattern in globs:
# Each pattern has to be converted to a platform-specific path
files.extend(glob(os.path.join(src_dir, convert_path(pattern))))
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
lastdir = None
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
srcfile = os.path.abspath(srcfile)
if copied and srcfile in self.distribution.convert_2to3_doctests:
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d,f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d!=prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f==oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d],[]).append(path)
def get_data_files(self): pass # kludge 2.4 for lazy computation
if sys.version<"2.4": # Python 2.4 already has this code
def get_outputs(self, include_bytecode=1):
"""Return complete list of files copied to the build directory
This includes both '.py' files and data files, as well as '.pyc'
and '.pyo' files if 'include_bytecode' is true. (This method is
needed for the 'install_lib' command to do its job properly, and to
generate a correct installation manifest.)
"""
return _build_py.get_outputs(self, include_bytecode) + [
os.path.join(build_dir, filename)
for package, src_dir, build_dir,filenames in self.data_files
for filename in filenames
]
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = _build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg==package or pkg.startswith(package+'.'):
break
else:
return init_py
f = open(init_py,'rbU')
if 'declare_namespace'.encode() not in f.read():
from distutils import log
log.warn(
"WARNING: %s is a namespace package, but its __init__.py does\n"
"not declare_namespace(); setuptools 0.7 will REQUIRE this!\n"
'(See the setuptools manual under "Namespace Packages" for '
"details.)\n", package
)
f.close()
return init_py
def initialize_options(self):
self.packages_checked={}
_build_py.initialize_options(self)
def get_package_dir(self, package):
res = _build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
globs = (self.exclude_package_data.get('', [])
+ self.exclude_package_data.get(package, []))
bad = []
for pattern in globs:
bad.extend(
fnmatch.filter(
files, os.path.join(src_dir, convert_path(pattern))
)
)
bad = dict.fromkeys(bad)
seen = {}
return [
f for f in files if f not in bad
and f not in seen and seen.setdefault(f,1) # ditch dupes
]
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"""Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""" % path
)
| mit |
maartenq/ansible | test/runner/lib/cloud/cs.py | 4 | 8841 | """CloudStack plugin for integration tests."""
from __future__ import absolute_import, print_function
import json
import os
import re
import time
from lib.cloud import (
CloudProvider,
CloudEnvironment,
)
from lib.util import (
find_executable,
ApplicationError,
display,
SubprocessError,
is_shippable,
)
from lib.http import (
HttpClient,
HttpError,
urlparse,
)
from lib.docker_util import (
docker_run,
docker_rm,
docker_inspect,
docker_pull,
docker_network_inspect,
get_docker_container_id,
)
try:
# noinspection PyPep8Naming
import ConfigParser as configparser
except ImportError:
# noinspection PyUnresolvedReferences
import configparser
class CsCloudProvider(CloudProvider):
"""CloudStack cloud provider plugin. Sets up cloud resources before delegation."""
DOCKER_SIMULATOR_NAME = 'cloudstack-sim'
def __init__(self, args):
"""
:type args: TestConfig
"""
super(CsCloudProvider, self).__init__(args, config_extension='.ini')
# The simulator must be pinned to a specific version to guarantee CI passes with the version used.
self.image = 'quay.io/ansible/cloudstack-test-container:1.2.0'
self.container_name = ''
self.endpoint = ''
self.host = ''
self.port = 0
def filter(self, targets, exclude):
"""Filter out the cloud tests when the necessary config and resources are not available.
:type targets: tuple[TestTarget]
:type exclude: list[str]
"""
if os.path.isfile(self.config_static_path):
return
docker = find_executable('docker', required=False)
if docker:
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
display.warning('Excluding tests marked "%s" which require the "docker" command or config (see "%s"): %s'
% (skip.rstrip('/'), self.config_template_path, ', '.join(skipped)))
def setup(self):
"""Setup the cloud resource before delegation and register a cleanup callback."""
super(CsCloudProvider, self).setup()
if self._use_static_config():
self._setup_static()
else:
self._setup_dynamic()
def get_remote_ssh_options(self):
"""Get any additional options needed when delegating tests to a remote instance via SSH.
:rtype: list[str]
"""
if self.managed:
return ['-R', '8888:localhost:8888']
return []
def get_docker_run_options(self):
"""Get any additional options needed when delegating tests to a docker container.
:rtype: list[str]
"""
if self.managed:
return ['--link', self.DOCKER_SIMULATOR_NAME]
return []
def cleanup(self):
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.container_name:
if is_shippable():
docker_rm(self.args, self.container_name)
elif not self.args.explain:
display.notice('Remember to run `docker rm -f %s` when finished testing.' % self.container_name)
super(CsCloudProvider, self).cleanup()
def _setup_static(self):
"""Configure CloudStack tests for use with static configuration."""
parser = configparser.RawConfigParser()
parser.read(self.config_static_path)
self.endpoint = parser.get('cloudstack', 'endpoint')
parts = urlparse(self.endpoint)
self.host = parts.hostname
if not self.host:
raise ApplicationError('Could not determine host from endpoint: %s' % self.endpoint)
if parts.port:
self.port = parts.port
elif parts.scheme == 'http':
self.port = 80
elif parts.scheme == 'https':
self.port = 443
else:
raise ApplicationError('Could not determine port from endpoint: %s' % self.endpoint)
display.info('Read cs host "%s" and port %d from config: %s' % (self.host, self.port, self.config_static_path), verbosity=1)
self._wait_for_service()
def _setup_dynamic(self):
"""Create a CloudStack simulator using docker."""
config = self._read_config_template()
self.container_name = self.DOCKER_SIMULATOR_NAME
results = docker_inspect(self.args, self.container_name)
if results and not results[0]['State']['Running']:
docker_rm(self.args, self.container_name)
results = []
if results:
display.info('Using the existing CloudStack simulator docker container.', verbosity=1)
else:
display.info('Starting a new CloudStack simulator docker container.', verbosity=1)
docker_pull(self.args, self.image)
docker_run(self.args, self.image, ['-d', '-p', '8888:8888', '--name', self.container_name])
if not self.args.explain:
display.notice('The CloudStack simulator will probably be ready in 2 - 4 minutes.')
container_id = get_docker_container_id()
if container_id:
display.info('Running in docker container: %s' % container_id, verbosity=1)
self.host = self._get_simulator_address()
display.info('Found CloudStack simulator container address: %s' % self.host, verbosity=1)
else:
self.host = 'localhost'
self.port = 8888
self.endpoint = 'http://%s:%d' % (self.host, self.port)
self._wait_for_service()
if self.args.explain:
values = dict(
HOST=self.host,
PORT=str(self.port),
)
else:
credentials = self._get_credentials()
if self.args.docker:
host = self.DOCKER_SIMULATOR_NAME
else:
host = self.host
values = dict(
HOST=host,
PORT=str(self.port),
KEY=credentials['apikey'],
SECRET=credentials['secretkey'],
)
config = self._populate_config_template(config, values)
self._write_config(config)
def _get_simulator_address(self):
networks = docker_network_inspect(self.args, 'bridge')
try:
bridge = [network for network in networks if network['Name'] == 'bridge'][0]
containers = bridge['Containers']
container = [containers[container] for container in containers if containers[container]['Name'] == self.DOCKER_SIMULATOR_NAME][0]
return re.sub(r'/[0-9]+$', '', container['IPv4Address'])
except:
display.error('Failed to process the following docker network inspect output:\n%s' %
json.dumps(networks, indent=4, sort_keys=True))
raise
def _wait_for_service(self):
"""Wait for the CloudStack service endpoint to accept connections."""
if self.args.explain:
return
client = HttpClient(self.args, always=True)
endpoint = self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack service: %s' % endpoint, verbosity=1)
try:
client.get(endpoint)
return
except SubprocessError:
pass
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack service.')
def _get_credentials(self):
"""Wait for the CloudStack simulator to return credentials.
:rtype: dict[str, str]
"""
client = HttpClient(self.args, always=True)
endpoint = '%s/admin.json' % self.endpoint
for _ in range(1, 30):
display.info('Waiting for CloudStack credentials: %s' % endpoint, verbosity=1)
response = client.get(endpoint)
if response.status_code == 200:
try:
return response.json()
except HttpError as ex:
display.error(ex)
time.sleep(10)
raise ApplicationError('Timeout waiting for CloudStack credentials.')
class CsCloudEnvironment(CloudEnvironment):
"""CloudStack cloud environment plugin. Updates integration test environment after delegation."""
def configure_environment(self, env, cmd):
"""
:type env: dict[str, str]
:type cmd: list[str]
"""
changes = dict(
CLOUDSTACK_CONFIG=self.config_path,
)
env.update(changes)
cmd.append('-e')
cmd.append('cs_resource_prefix=%s' % self.resource_prefix)
| gpl-3.0 |
jkyeung/XlsxWriter | xlsxwriter/test/xmlwriter/test_xmlwriter.py | 1 | 4972 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ...xmlwriter import XMLwriter
class TestXMLwriter(unittest.TestCase):
"""
Test the XML Writer class.
"""
def setUp(self):
self.fh = StringIO()
self.writer = XMLwriter()
self.writer._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test _xml_declaration()"""
self.writer._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_start_tag(self):
"""Test _xml_start_tag() with no attributes"""
self.writer._xml_start_tag('foo')
exp = """<foo>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_start_tag_with_attributes(self):
"""Test _xml_start_tag() with attributes"""
self.writer._xml_start_tag('foo', [('span', '8'), ('baz', '7')])
exp = """<foo span="8" baz="7">"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_start_tag_with_attributes_to_escape(self):
"""Test _xml_start_tag() with attributes requiring escaping"""
self.writer._xml_start_tag('foo', [('span', '&<>"')])
exp = """<foo span="&<>"">"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_start_tag_unencoded(self):
"""Test _xml_start_tag_unencoded() with attributes"""
self.writer._xml_start_tag_unencoded('foo', [('span', '&<>"')])
exp = """<foo span="&<>"">"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_end_tag(self):
"""Test _xml_end_tag()"""
self.writer._xml_end_tag('foo')
exp = """</foo>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_empty_tag(self):
"""Test _xml_empty_tag()"""
self.writer._xml_empty_tag('foo')
exp = """<foo/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_empty_tag_with_attributes(self):
"""Test _xml_empty_tag() with attributes"""
self.writer._xml_empty_tag('foo', [('span', '8')])
exp = """<foo span="8"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_empty_tag_unencoded(self):
"""Test _xml_empty_tag_unencoded() with attributes"""
self.writer._xml_empty_tag_unencoded('foo', [('span', '&')])
exp = """<foo span="&"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_data_element(self):
"""Test _xml_data_element()"""
self.writer._xml_data_element('foo', 'bar')
exp = """<foo>bar</foo>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_data_element_with_attributes(self):
"""Test _xml_data_element() with attributes"""
self.writer._xml_data_element('foo', 'bar', [('span', '8')])
exp = """<foo span="8">bar</foo>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_data_element_with_escapes(self):
"""Test _xml_data_element() with data requiring escaping"""
self.writer._xml_data_element('foo', '&<>"', [('span', '8')])
exp = """<foo span="8">&<>"</foo>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_string_element(self):
"""Test _xml_string_element()"""
self.writer._xml_string_element(99, [('span', '8')])
exp = """<c span="8" t=\"s\"><v>99</v></c>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_si_element(self):
"""Test _xml_si_element()"""
self.writer._xml_si_element('foo', [('span', '8')])
exp = """<si><t span="8">foo</t></si>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_rich_si_element(self):
"""Test _xml_rich_si_element()"""
self.writer._xml_rich_si_element('foo')
exp = """<si>foo</si>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_number_element(self):
"""Test _xml_number_element()"""
self.writer._xml_number_element(99, [('span', '8')])
exp = """<c span="8"><v>99</v></c>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_xml_formula_element(self):
"""Test _xml_formula_element()"""
self.writer._xml_formula_element('1+2', 3, [('span', '8')])
exp = """<c span="8"><f>1+2</f><v>3</v></c>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| bsd-2-clause |
percyfal/bokeh | examples/plotting/file/css_classes.py | 11 | 1585 | from jinja2 import Template
from bokeh.embed import file_html
from bokeh.layouts import column
from bokeh.models import Div, Paragraph
from bokeh.resources import CDN
from bokeh.util.browser import view
template = Template("""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>{{ title if title else "Bokeh Plot" }}</title>
{{ bokeh_css }}
{{ bokeh_js }}
<style>
.custom {
border-radius: 0.5em;
padding: 1em;
}
.custom-1 {
border: 3px solid #2397D8;
}
.custom-2 {
border: 3px solid #14999A;
background-color: whitesmoke;
}
</style>
</head>
<body>
{{ plot_div|indent(8) }}
{{ plot_script|indent(8) }}
</body>
</html>
""")
p = Paragraph(text="The divs below were configured with additional css_classes:")
div1 = Div(text="""
<p> This Bokeh Div adds the style classes:<p>
<pre>
.custom {
border-radius: 0.5em;
padding: 1em;
}
.custom-1 {
border: 3px solid #2397D8;
}
</pre>
""")
div1.css_classes = ["custom", "custom-1"]
div2 = Div(text="""
<p> This Bokeh Div adds the style classes:<p>
<pre>
.custom {
border-radius: 0.5em;
padding: 1em;
}
.custom-2 {
border: 3px solid #14999A;
background-color: whitesmoke;
}
</pre>
""")
div2.css_classes = ["custom", "custom-2"]
html = file_html(column(p, div1, div2), template=template, resources=CDN)
output_file = 'css_classes.html'
with open(output_file, 'w') as f:
f.write(html)
view(output_file)
| bsd-3-clause |
PeterCP/Genera-Flask | migrations/versions/f90d3b386518_add_columns_to_event_model.py | 1 | 1289 | """Add columns to Event model
Columns added:
location_name
latitude
longitude
price
and max_attendants
Revision ID: f90d3b386518
Revises: 1150136bf0ab
Create Date: 2016-08-08 11:30:45.154593
"""
# revision identifiers, used by Alembic.
revision = 'f90d3b386518'
down_revision = '1150136bf0ab'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('events', sa.Column('latitude', sa.Numeric(precision=10, scale=7), nullable=True))
op.add_column('events', sa.Column('longitude', sa.Numeric(precision=10, scale=7), nullable=True))
op.add_column('events', sa.Column('location_name', sa.String(length=255), nullable=True))
op.add_column('events', sa.Column('max_attendants', sa.Integer(), nullable=True))
op.add_column('events', sa.Column('price', sa.Numeric(precision=10, scale=3), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('events', 'price')
op.drop_column('events', 'max_attendants')
op.drop_column('events', 'location_name')
op.drop_column('events', 'longitude')
op.drop_column('events', 'latitude')
### end Alembic commands ###
| gpl-3.0 |
devonjones/PSRD-Parser | src/psrd/extensions/table/weapon.py | 1 | 2927 | #import pprint
from BeautifulSoup import BeautifulSoup
from psrd.extensions.table.general import add_additional_fields, fix_cost, set_subtype, clear_nbsp
def set_damage(weapon):
damage = {
"see text": ('see text', 'see text'),
"special": ('special', 'special'),
"Special": ('Special', 'Special'),
"1d2": ('—', '1d3'),
"1d3": ('1', '1d4'),
"1d4": ('1d2', '1d6'),
"1d6": ('1d3', '1d8'),
"1d8": ('1d4', '2d6'),
"1d10": ('1d6', '2d8'),
"1d12": ('1d10', '3d6'),
"2d4": ('1d6', '2d6'),
"2d6": ('1d8', '3d6'),
"2d8": ('1d6', '3d8'),
"2d10": ('2d8', '4d8'),
"2d12": ('2d10', '6d6'),
"3d6": ('2d6', '4d6'),
"3d8": ('2d8', '4d8'),
"4d6": ('3d6', '6d6'),
"4d8": ('3d8', '6d8'),
"6d6": ('4d6', '8d6'),
"6d8": ('4d8', '8d8'),
"8d6": ('6d6', '12d6'),
"8d8": ('6d8', '12d8'),
}
if weapon.has_key(u"Dmg (M)"):
medium = weapon[u"Dmg (M)"]
parts = medium.split('/')
tiny = []
large = []
for part in parts:
dmg = damage[part]
tiny.append(dmg[0])
large.append(dmg[1])
weapon["Dmg (T)"] = '/'.join(tiny)
weapon["Dmg (L)"] = '/'.join(large)
def process_weapons(table_data, weapons):
remap = {"Cost": "price", "Weight": "weight", "Name": "name"}
for weapon in weapons:
item = {"type": "item"}
set_damage(weapon)
fix_cost(weapon)
fields = weapon.keys()
fields.sort()
for key in fields:
if key in remap.keys():
item[remap[key]] = weapon[key]
else:
misc = item.setdefault('misc', [])
subsection = "Weapon"
value = weapon[key]
if weapon['Name'] in table_data.get('distinct_section', {}):
subsection = table_data['distinct_section'][weapon['Name']]
misc.append({
"field": key,
"subsection": subsection,
"value": value})
weapon['item'] = item
set_subtype(table_data, "Weapon Class", weapon)
#pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(weapons)
return weapons
def parse_weapon_table(table, table_data):
soup = BeautifulSoup(table['body'])
fields = parse_weapon_header(soup.table.thead.tr)
category = None
weapon_class = None
weapons = []
for line in soup.table.contents:
if line.name == 'thead':
category = line.tr.contents[0].getText()
elif line.name == 'tr':
if len(line.contents) == 1:
weapon_class = line.getText()
else:
weapon = {'Proficiency':category, 'Weapon Class': weapon_class}
for i in range(len(line.contents)):
data = line.contents[i].getText()
if data != '—':
weapon[fields[i]] = clear_nbsp(
line.contents[i].getText())
elif fields[i] in ('Cost', 'Weight'):
weapon[fields[i]] = line.contents[i].getText()
add_additional_fields(table_data, weapon)
weapons.append(weapon)
return process_weapons(table_data, weapons)
def parse_weapon_header(line):
for sup in line.findAll('sup'):
sup.extract()
fields = [td.getText() for td in line.contents]
fields.pop(0)
fields.insert(0, 'Name')
return fields
| gpl-3.0 |
jonashaag/django-nonrel-nohistory | django/contrib/gis/tests/test_geoip.py | 290 | 4204 | import os, unittest
from django.db import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.utils import GeoIP, GeoIPException
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, basestring):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '130.80.29.3'
fqdn = 'chron.com'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.failIf(not isinstance(geom, GEOSGeometry))
lon, lat = (-95.3670, 29.7523)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| bsd-3-clause |
welex91/ansible-modules-core | web_infrastructure/django_manage.py | 36 | 11488 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Scott Anderson <scottanderson42@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: django_manage
short_description: Manages a Django application.
description:
- Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true
app_path:
description:
- The path to the root of the Django application where B(manage.py) lives.
required: true
settings:
description:
- The Python path to the application's settings module, such as 'myapp.settings'.
required: false
pythonpath:
description:
- A directory to add to the Python path. Typically used to include the settings module if it is located external to the application directory.
required: false
virtualenv:
description:
- An optional path to a I(virtualenv) installation to use while running the manage application.
required: false
apps:
description:
- A list of space-delimited apps to target. Used by the 'test' command.
required: false
cache_table:
description:
- The name of the table used for database-backed caching. Used by the 'createcachetable' command.
required: false
database:
description:
- The database to target. Used by the 'createcachetable', 'flush', 'loaddata', and 'syncdb' commands.
required: false
failfast:
description:
- Fail the command immediately if a test fails. Used by the 'test' command.
required: false
default: "no"
choices: [ "yes", "no" ]
fixtures:
description:
- A space-delimited list of fixture file names to load in the database. B(Required) by the 'loaddata' command.
required: false
skip:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
EXAMPLES = """
# Run cleanup on the application installed in 'django_dir'.
- django_manage: command=cleanup app_path={{ django_dir }}
# Load the initial_data fixture into the application
- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
# Run syncdb on the application
- django_manage: >
command=syncdb
app_path={{ django_dir }}
settings={{ settings_app_name }}
pythonpath={{ settings_dir }}
virtualenv={{ virtualenv_dir }}
# Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
# Create an initial superuser.
- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
"""
import os
def _fail(module, cmd, out, err, **kwargs):
msg = ''
if out:
msg += "stdout: %s" % (out, )
if err:
msg += "\n:stderr: %s" % (err, )
module.fail_json(cmd=cmd, msg=msg, **kwargs)
def _ensure_virtualenv(module):
venv_param = module.params['virtualenv']
if venv_param is None:
return
vbin = os.path.join(os.path.expanduser(venv_param), 'bin')
activate = os.path.join(vbin, 'activate')
if not os.path.exists(activate):
virtualenv = module.get_bin_path('virtualenv', True)
vcmd = '%s %s' % (virtualenv, venv_param)
vcmd = [virtualenv, venv_param]
rc, out_venv, err_venv = module.run_command(vcmd)
if rc != 0:
_fail(module, vcmd, out_venv, err_venv)
os.environ["PATH"] = "%s:%s" % (vbin, os.environ["PATH"])
os.environ["VIRTUAL_ENV"] = venv_param
def createcachetable_filter_output(line):
return "Already exists" not in line
def flush_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def loaddata_filter_output(line):
return "Installed" in line and "Installed 0 object" not in line
def syncdb_filter_output(line):
return ("Creating table " in line) or ("Installed" in line and "Installed 0 object" not in line)
def migrate_filter_output(line):
return ("Migrating forwards " in line) or ("Installed" in line and "Installed 0 object" not in line) or ("Applying" in line)
def collectstatic_filter_output(line):
return "0 static files" not in line
def main():
command_allowed_param_map = dict(
cleanup=(),
createcachetable=('cache_table', 'database', ),
flush=('database', ),
loaddata=('database', 'fixtures', ),
syncdb=('database', ),
test=('failfast', 'testrunner', 'liveserver', 'apps', ),
validate=(),
migrate=('apps', 'skip', 'merge', 'database',),
collectstatic=('clear', 'link', ),
)
command_required_param_map = dict(
loaddata=('fixtures', ),
)
# forces --noinput on every command that needs it
noinput_commands = (
'flush',
'syncdb',
'migrate',
'test',
'collectstatic',
)
# These params are allowed for certain commands only
specific_params = ('apps', 'clear', 'database', 'failfast', 'fixtures', 'liveserver', 'testrunner')
# These params are automatically added to the command if present
general_params = ('settings', 'pythonpath', 'database',)
specific_boolean_params = ('clear', 'failfast', 'skip', 'merge', 'link')
end_of_command_params = ('apps', 'cache_table', 'fixtures')
module = AnsibleModule(
argument_spec=dict(
command = dict(default=None, required=True),
app_path = dict(default=None, required=True),
settings = dict(default=None, required=False),
pythonpath = dict(default=None, required=False, aliases=['python_path']),
virtualenv = dict(default=None, required=False, aliases=['virtual_env']),
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
clear = dict(default=None, required=False, type='bool'),
database = dict(default=None, required=False),
failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
skip = dict(default=None, required=False, type='bool'),
merge = dict(default=None, required=False, type='bool'),
link = dict(default=None, required=False, type='bool'),
),
)
command = module.params['command']
app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv']
for param in specific_params:
value = module.params[param]
if param in specific_boolean_params:
value = module.boolean(value)
if value and param not in command_allowed_param_map[command]:
module.fail_json(msg='%s param is incompatible with command=%s' % (param, command))
for param in command_required_param_map.get(command, ()):
if not module.params[param]:
module.fail_json(msg='%s param is required for command=%s' % (param, command))
_ensure_virtualenv(module)
cmd = "./manage.py %s" % (command, )
if command in noinput_commands:
cmd = '%s --noinput' % cmd
for param in general_params:
if module.params[param]:
cmd = '%s --%s=%s' % (cmd, param, module.params[param])
for param in specific_boolean_params:
if module.boolean(module.params[param]):
cmd = '%s --%s' % (cmd, param)
# these params always get tacked on the end of the command
for param in end_of_command_params:
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
rc, out, err = module.run_command(cmd, cwd=os.path.expanduser(app_path))
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
else:
if "Unknown command:" in err:
_fail(module, cmd, err, "Unknown django command: %s" % command)
_fail(module, cmd, out, err, path=os.environ["PATH"], syspath=sys.path)
changed = False
lines = out.split('\n')
filt = globals().get(command + "_filter_output", None)
if filt:
filtered_output = filter(filt, out.split('\n'))
if len(filtered_output):
changed = filtered_output
module.exit_json(changed=changed, out=out, cmd=cmd, app_path=app_path, virtualenv=virtualenv,
settings=module.params['settings'], pythonpath=module.params['pythonpath'])
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
opencloudinfra/orchestrator | venv/Lib/site-packages/django/conf/locale/de/formats.py | 504 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 |
rienafairefr/pynYNAB | docs/conf.py | 2 | 4876 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
# -- Project information -----------------------------------------------------
project = 'pynYNAB'
copyright = '2018, rienafairefr'
author = 'rienafairefr'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinxarg.ext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pynYNABdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pynYNAB.tex', 'pynYNAB Documentation',
'rienafairefr', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pynynab', 'pynYNAB Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pynYNAB', 'pynYNAB Documentation',
author, 'pynYNAB', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | mit |
aaxelb/SHARE | share/transformers/mods.py | 2 | 16357 | import re
import logging
import xmltodict
from share.transform.chain import ChainTransformer, ctx, links as tools
from share.transform.chain.exceptions import InvalidIRI
from share.transform.chain.links import GuessAgentTypeLink
from share.transform.chain.parsers import Parser
from share.transform.chain.utils import force_text
from share.transform.chain.utils import oai_allowed_by_sets
logger = logging.getLogger(__name__)
def get_list(dct, key):
val = dct.get(key, [])
return val if isinstance(val, list) else [val]
#### Identifiers ####
class MODSWorkIdentifier(Parser):
schema = 'WorkIdentifier'
uri = tools.RunPython(force_text, ctx)
class Extra:
identifier_type = tools.Try(ctx['@type'])
class MODSAgentIdentifier(Parser):
schema = 'AgentIdentifier'
uri = ctx
#### Agents ####
class AffiliatedAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class IsAffiliatedWith(Parser):
related = tools.Delegate(AffiliatedAgent, ctx)
class MODSAgent(Parser):
schema = tools.RunPython('get_agent_schema', ctx)
name = tools.OneOf(
tools.RunPython(force_text, ctx['mods:displayForm']),
tools.RunPython('squash_name_parts', ctx)
)
related_agents = tools.Map(tools.Delegate(IsAffiliatedWith), tools.Concat(tools.Try(
tools.Filter(lambda x: bool(x), tools.RunPython(force_text, ctx['mods:affiliation']))
)))
identifiers = tools.Map(
tools.Delegate(MODSAgentIdentifier),
tools.Unique(tools.Map(
tools.Try(tools.IRI(), exceptions=(InvalidIRI, )),
tools.Map(
tools.RunPython(force_text),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Try(ctx['mods:nameIdentifier']),
)
)
))
)
class Extra:
name_type = tools.Try(ctx['@type'])
name_part = tools.Try(ctx['mods:namePart'])
affiliation = tools.Try(ctx['mods:affiliation'])
description = tools.Try(ctx['mods:description'])
display_form = tools.Try(ctx['mods:displayForm'])
etal = tools.Try(ctx['mods:etal'])
name_identifier = tools.Try(ctx['mods:nameIdentifier'])
def squash_name_parts(self, name):
name_parts = get_list(name, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts])
def get_agent_schema(self, obj):
name_type = obj.get('@type')
if name_type == 'personal':
return 'person'
if name_type == 'conference':
return 'organization'
# TODO SHARE-718
# if name_type == 'family':
# return 'family'
if name_type == 'corporate':
return GuessAgentTypeLink(default='organization').execute(self.squash_name_parts(obj))
return GuessAgentTypeLink().execute(self.squash_name_parts(obj))
class MODSPersonSplitName(MODSAgent):
schema = 'person'
name = None
family_name = tools.RunPython('get_name_part', ctx, 'family')
given_name = tools.RunPython('get_name_part', ctx, 'given')
suffix = tools.RunPython('get_name_part', ctx, 'termsOfAddress')
def get_name_part(self, obj, type):
name_parts = get_list(obj, 'mods:namePart')
return ' '.join([force_text(n) for n in name_parts if n.get('@type') == type])
class MODSSimpleAgent(Parser):
schema = tools.GuessAgentType(ctx, default='organization')
name = ctx
class MODSSimplePublisher(Parser):
schema = 'Publisher'
agent = tools.Delegate(MODSSimpleAgent, ctx)
#### Tags/Subjects ####
class MODSSubject(Parser):
schema = 'Subject'
name = ctx
class MODSThroughSubjects(Parser):
schema = 'ThroughSubjects'
subject = tools.Delegate(MODSSubject, ctx)
class MODSTag(Parser):
schema = 'Tag'
name = ctx
class MODSThroughTags(Parser):
schema = 'ThroughTags'
tag = tools.Delegate(MODSTag, ctx)
#### Work Relations ####
RELATION_MAP = {
# 'preceding':
# 'succeeding':
'original': 'IsDerivedFrom',
'host': 'IsPartOf',
'constituent': 'IsPartOf',
'series': 'IsPartOf',
# 'otherVersion':
# 'otherFormat':
'isReferencedBy': 'References',
'references': 'References',
'reviewOf': 'Reviews',
}
REVERSE_RELATIONS = {
'isReferencedBy',
'constituent',
}
# Finds the generated subclass of MODSCreativeWork
def related_work_parser(_):
return type(next(p for p in ctx.parsers if isinstance(p, MODSCreativeWork)))
def map_relation_type(obj):
return RELATION_MAP.get(obj['@type'], 'WorkRelation')
class MODSReverseWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
subject = tools.Delegate(related_work_parser, ctx)
class MODSWorkRelation(Parser):
schema = tools.RunPython(map_relation_type)
related = tools.Delegate(related_work_parser, ctx)
def work_relation_parser(obj):
if obj['@type'] in REVERSE_RELATIONS:
return MODSReverseWorkRelation
return MODSWorkRelation
#### Agent-work relations ####
def agent_parser(name):
name_parts = get_list(name, 'mods:namePart')
split_name = any(isinstance(n, dict) and n.get('@type') in {'given', 'family'} for n in name_parts)
return MODSPersonSplitName if split_name else MODSAgent
class MODSAgentWorkRelation(Parser):
schema = 'AgentWorkRelation'
agent = tools.Delegate(agent_parser, ctx)
cited_as = tools.RunPython(force_text, tools.Try(ctx['mods:displayForm']))
class MODSHost(MODSAgentWorkRelation):
schema = 'Host'
class MODSFunder(MODSAgentWorkRelation):
schema = 'Funder'
class MODSContributor(MODSAgentWorkRelation):
schema = 'Contributor'
class MODSCreator(MODSContributor):
schema = 'Creator'
order_cited = ctx('index')
class MODSPublisher(MODSAgentWorkRelation):
schema = 'Publisher'
#### Works ####
class MODSCreativeWork(Parser):
default_type = 'CreativeWork'
type_map = None
role_map = None
schema = tools.RunPython(
'get_schema',
tools.OneOf(
tools.RunPython(force_text, ctx['mods:genre']),
tools.Static(None)
)
)
title = tools.RunPython('join_title_info', ctx)
# Abstracts have the optional attribute "shareable". Don't bother checking for it, because
# abstracts that are not shareable should not have been shared with SHARE.
description = tools.Join(tools.RunPython(force_text, tools.Try(ctx['mods:abstract']), '\n'))
identifiers = tools.Map(
tools.Delegate(MODSWorkIdentifier),
tools.Filter(
lambda obj: 'invalid' not in obj,
tools.Concat(
tools.Try(ctx['mods:identifier']),
tools.Try(ctx.header['identifier']),
tools.Try(ctx['mods:location']['mods:url']),
)
)
)
related_works = tools.Concat(
tools.Map(
tools.Delegate(work_relation_parser),
tools.Try(ctx['mods:relatedItem'])
)
)
related_agents = tools.Concat(
tools.Map(
tools.Delegate(MODSCreator),
tools.RunPython('filter_names', ctx, 'creator')
),
tools.Map(
tools.Delegate(MODSFunder),
tools.RunPython('filter_names', ctx, 'funder')
),
tools.Map(
tools.Delegate(MODSHost),
tools.RunPython('filter_names', ctx, 'host')
),
tools.Map(
tools.Delegate(MODSPublisher),
tools.RunPython('filter_names', ctx, 'publisher')
),
tools.Map(
tools.Delegate(MODSContributor),
tools.RunPython('filter_names', ctx, 'creator', 'funder', 'host', 'publisher', invert=True)
),
tools.Map(
tools.Delegate(MODSSimplePublisher),
tools.Try(ctx['mods:originInfo']['mods:publisher']),
),
)
rights = tools.RunPython(force_text, tools.Try(ctx['mods:accessCondition']), '\n')
language = tools.ParseLanguage(
tools.Try(ctx['mods:language']['mods:languageTerm']),
)
subjects = tools.Map(
tools.Delegate(MODSThroughSubjects),
tools.Subjects(
tools.Concat(
tools.Try(ctx['mods:subject']['mods:topic']),
)
)
)
tags = tools.Map(
tools.Delegate(MODSThroughTags),
tools.Concat(
tools.Map(
tools.RunPython('tokenize'),
tools.Map(
tools.RunPython(force_text),
tools.Try(ctx.header.setSpec),
tools.Try(ctx['mods:genre']),
tools.Try(ctx['mods:classification']),
tools.Try(ctx['mods:subject']['mods:topic']),
)
),
deep=True
)
)
date_updated = tools.ParseDate(tools.Try(ctx.header.datestamp))
# TODO (in regulator) handle date ranges, uncertain dates ('1904-1941', '1890?', '1980-', '19uu', etc.)
date_published = tools.OneOf(
tools.ParseDate(tools.RunPython(force_text, tools.Try(ctx['mods:originInfo']['mods:dateIssued']))),
tools.Static(None)
)
is_deleted = tools.RunPython(lambda status: status == 'deleted', tools.Try(ctx.record.header['@status']))
class Extra:
"""
Fields that are combined in the base parser are relisted as singular elements that match
their original entry to preserve raw data structure.
"""
# (dc:description) http://www.loc.gov/standards/mods/userguide/abstract.html
abstract = tools.Try(ctx['mods:abstract'])
# (dc:rights) http://www.loc.gov/standards/mods/userguide/accesscondition.html
accessConditions = tools.Try(ctx['mods:accessCondition'])
# (dc:subject) http://www.loc.gov/standards/mods/userguide/classification.html
classification = tools.Try(ctx['mods:classification'])
# (N/A) http://www.loc.gov/standards/mods/userguide/extension.html
extension = tools.Try(ctx['mods:extension'])
# SHARE type
# (dc:type) http://www.loc.gov/standards/mods/userguide/genre.html
genre = tools.Try(ctx['mods:genre'])
# (dc:identifier) http://www.loc.gov/standards/mods/userguide/identifier.html
identifier = tools.Try(ctx['mods:identifier'])
# (dc:language) http://www.loc.gov/standards/mods/userguide/language.html
language = tools.Try(ctx['mods:language'])
# (dc:identifier for url) http://www.loc.gov/standards/mods/userguide/location.html
location = tools.Try(ctx['mods:location'])
# (dc:creator|dc:contributor) http://www.loc.gov/standards/mods/userguide/name.html
name = tools.Try(ctx['mods:name'])
# (dc:description) http://www.loc.gov/standards/mods/userguide/note.html
note = tools.Try(ctx['mods:note'])
# (dc:publisher|dc:date) http://www.loc.gov/standards/mods/userguide/origininfo.html
originInfo = tools.Try(ctx['mods:originInfo'])
# Extra
# (dc:title) http://www.loc.gov/standards/mods/userguide/part.html
part = tools.Try(ctx['mods:part'])
# (dc:format or N/A) http://www.loc.gov/standards/mods/userguide/physicaldescription.html
physicalDescription = tools.Try(ctx['mods:physicalDescription'])
# Metadata information
# (N/A) http://www.loc.gov/standards/mods/userguide/recordinfo.html
recordInfo = tools.Try(ctx['mods:recordInfo'])
# (dc:relation) http://www.loc.gov/standards/mods/userguide/relateditem.html
relatedItem = tools.Try(ctx['mods:relatedItem'])
# (dc:subject|dc:type|dc:coverage|N/A) http://www.loc.gov/standards/mods/userguide/subject.html
subject = tools.Try(ctx['mods:subject'])
# (dc:description) http://www.loc.gov/standards/mods/userguide/tableofcontents.html
tableOfContents = tools.Try(ctx['mods:tableOfContents'])
# (N/A) http://www.loc.gov/standards/mods/userguide/targetaudience.html
targetAudience = tools.Try(ctx['mods:targetAudience'])
# (dc:title) http://www.loc.gov/standards/mods/userguide/titleinfo.html
titleInfo = tools.Try(ctx['mods:titleInfo'])
# Extra
# (dc:type) http://www.loc.gov/standards/mods/userguide/typeofresource.html
typeOfResource = tools.Try(ctx['mods:typeOfResource'])
def get_schema(self, types):
if not types or not self.type_map:
return self.default_type
if isinstance(types, str):
types = [types]
for t in types:
if isinstance(t, dict):
t = t['#text']
t = t.lower()
if t in self.type_map:
return self.type_map[t]
return self.default_type
def tokenize(self, data):
if isinstance(data, str):
data = [data]
tokens = []
for item in data:
tokens.extend([x.strip() for x in re.split(r'(?: - )|\.|,', item) if x])
return tokens
# Map titleInfos to a string: https://www.loc.gov/standards/mods/userguide/titleinfo.html#mappings
def join_title_info(self, obj):
def get_part(title_info, part_name, delimiter=''):
part = force_text(title_info.get(part_name, ''), ' ').strip()
return delimiter + part if part else ''
title_infos = get_list(obj, 'mods:titleInfo')
titles = []
for title_info in title_infos:
title = ''
title += get_part(title_info, 'mods:nonSort')
title += get_part(title_info, 'mods:title')
title += get_part(title_info, 'mods:subTitle', ': ')
title += get_part(title_info, 'mods:partNumber', '. ')
title += get_part(title_info, 'mods:partName', ': ')
if title:
titles.append(title)
return '. '.join(titles)
def filter_names(self, obj, *roles, invert=False):
names = get_list(obj, 'mods:name')
filtered = [*names] if invert else []
for name in names:
name_roles = get_list(name, 'mods:role')
for role in name_roles:
role_terms = get_list(role, 'mods:roleTerm')
name_roles = {force_text(r).lower() for r in role_terms}
name_roles.update({self.role_map[r] for r in name_roles if r in self.role_map})
if name_roles.intersection(roles):
if invert:
filtered.remove(name)
else:
filtered.append(name)
return filtered
class MODSTransformer(ChainTransformer):
"""Transformer for oai_dc metadata format.
transformer_kwargs (TODO explain):
emitted_type
approved_sets
blocked_sets
type_map
role_map
"""
VERSION = 1
marc_roles = {
'fnd': 'funder',
'hst': 'host',
'his': 'host',
'pbl': 'publisher',
'cre': 'creator',
'aut': 'creator',
'author': 'creator',
}
def get_root_parser(self, unwrapped, emitted_type='creativework', type_map=None, role_map=None, **kwargs):
root_type_map = {
**{r.lower(): r for r in self.allowed_roots},
**{t.lower(): v for t, v in (type_map or {}).items()}
}
root_role_map = {
**{k: v for k, v in self.marc_roles.items()},
**{k.lower(): v.lower() for k, v in (role_map or {}).items()}
}
class RootParser(MODSCreativeWork):
default_type = emitted_type.lower()
type_map = root_type_map
role_map = root_role_map
return RootParser
def do_transform(self, datum, approved_sets=None, blocked_sets=None, **kwargs):
if not oai_allowed_by_sets(datum, blocked_sets, approved_sets):
return (None, None)
return super().do_transform(datum, **kwargs)
def unwrap_data(self, data, namespaces=None, **kwargs):
unwrapped_data = xmltodict.parse(data, process_namespaces=True, namespaces=(namespaces or self.NAMESPACES))
return {
**unwrapped_data['record'].get('metadata', {}).get('mods:mods', {}),
'header': unwrapped_data['record']['header'],
}
| apache-2.0 |
INCF/BIDS2ISATab | setup.py | 1 | 2176 | from setuptools import setup
import os
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="BIDS2ISATab",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool generating ISA-Tab compatible description from a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/INCF/BIDS2ISATab',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='bids isatab',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["bids2isatab"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["future",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'bids2isatab=bids2isatab.main:main',
],
},
)
| apache-2.0 |
gfyoung/scipy | benchmarks/benchmarks/special.py | 33 | 1670 | from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.special import ai_zeros, bi_zeros, erf, expn
except ImportError:
pass
try:
# wasn't always in scipy.special, so import separately
from scipy.special import comb
except ImportError:
pass
try:
from scipy.special import loggamma
except ImportError:
pass
from .common import Benchmark, with_attributes
class Airy(Benchmark):
def time_ai_zeros(self):
ai_zeros(100000)
def time_bi_zeros(self):
bi_zeros(100000)
class Erf(Benchmark):
def setup(self, *args):
self.rand = np.random.rand(1e5)
def time_real(self, offset):
erf(self.rand + offset)
time_real.params = [0.0, 2.0]
time_real.param_names = ['offset']
class Comb(Benchmark):
def setup(self, *args):
self.N = np.arange(1, 1000, 50)
self.k = np.arange(1, 1000, 50)
@with_attributes(params=[(10, 100, 1000, 10000), (1, 10, 100)],
param_names=['N', 'k'])
def time_comb_exact(self, N, k):
comb(N, k, exact=True)
def time_comb_float(self):
comb(self.N[:,None], self.k[None,:])
class Loggamma(Benchmark):
def setup(self):
x, y = np.logspace(3, 5, 10), np.logspace(3, 5, 10)
x, y = np.meshgrid(x, y)
self.large_z = x + 1j*y
def time_loggamma_asymptotic(self):
loggamma(self.large_z)
class Expn(Benchmark):
def setup(self):
n, x = np.arange(50, 500), np.logspace(0, 20, 100)
n, x = np.meshgrid(n, x)
self.n, self.x = n, x
def time_expn_large_n(self):
expn(self.n, self.x)
| bsd-3-clause |
Frezzle/xen-api | scripts/import-boxgrinder.py | 25 | 9294 | #!/usr/bin/env python
#
# Copyright (C) Citrix Inc
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# Import appliances generated by boxgrinder into XenServer/XCP
import os, sys, time, socket, traceback, syslog
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
pid = None
use_syslog = False
def reopenlog(log_file):
global log_f
if log_f:
log_f.close()
if log_file and log_file <> "stdout:":
log_f = open(log_file, "aw")
elif log_file and log_file == "stdout:":
log_f = os.fdopen(os.dup(sys.stdout.fileno()), "aw")
def log(txt):
global log_f, pid, use_syslog
if use_syslog:
syslog.syslog(txt)
return
if not pid:
pid = os.getpid()
t = time.strftime("%Y%m%dT%H:%M:%SZ", time.gmtime())
print >>log_f, "%s [%d] %s" % (t, pid, txt)
log_f.flush()
# For reference, here's what the boxgrinder default output XML looks like
# Is there a definition somewhere?
example = """

"""
import xmlrpclib
class XCPError(Exception):
def __init__(self, result):
self.result = result
def __str__(self):
# {'Status': 'Failure', 'ErrorDescription': ['SESSION_AUTHENTICATION_FAILED', 'a', 'Authentication failure']}
return " ".join(self.result["ErrorDescription"])
class Failure(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return self.reason
def value(x):
if "Value" in x:
return x["Value"]
else:
raise XCPError(x)
# We base our VMs off this generic HVM template
base_template = "Other install media"
import xml.dom.minidom
import sys
# Creates the VM, VBDs and VDIs
def import_metadata(server, session, filename):
doc = xml.dom.minidom.parse(filename)
def getSingleElement(doc, name):
elements = doc.getElementsByTagName(name)
if len(elements) <> 1:
raise Failure("Expecting exactly one <%s> element" % name)
return elements[0]
image = getSingleElement(doc, "image")
domain = getSingleElement(image, "domain")
boot = getSingleElement(domain, "boot")
devices = getSingleElement(domain, "devices")
storage = getSingleElement(image, "storage")
def getText(doc, name):
nodes = doc.getElementsByTagName(name)
if len(nodes) <> 1:
print >>sys.stderr, "Expecting exactly one %s tag" % name
sys.exit(1)
result = ""
for child in nodes[0].childNodes:
if child.nodeType == child.TEXT_NODE:
result = result + child.data
return result
def getAttr(doc, name):
for (n, value) in doc.attributes.items():
if name == n:
return value
return ""
# Clone the "Other install media" template and inherit basic
# properties from it.
templates = value(server.VM.get_by_name_label(session, base_template))
if len(templates) <> 1:
raise Failure("Expecting exactly one \"%s\" template" % base_template)
template = templates[0]
name = getText(image, "name")
log("Cloning template %s into %s" % (base_template, name))
vm = value(server.VM.clone(session, template, name))
value(server.VM.set_is_a_template(session, vm, False))
vcpu = getText(devices, "vcpu")
if vcpu <> "":
log("Setting number of vCPUs to: %s" % vcpu)
value(server.VM.set_VCPUs_max(session, vm, vcpu))
value(server.VM.set_VCPUs_at_startup(session, vm, vcpu))
memory = getText(devices, "memory") # KiB
if memory <> "":
log("Setting memory to %s KiB" % memory)
bytes = str(long(memory) * 1024L)
value(server.VM.set_memory_limits(session, vm, "0", bytes, bytes, bytes))
boot_type = getAttr(boot, "type")
if boot_type == "hvm":
log("VM is set to HVM boot by default")
else:
log("Ignoring unknown boot type: %s" % boot_type)
# Disks
disks = storage.getElementsByTagName("disk")
drives = boot.getElementsByTagName("drive")
pool = value(server.pool.get_all(session))[0]
sr = value(server.pool.get_default_SR(session, pool))
try:
log("Will create disks in the default SR: %s" % (value(server.SR.get_name_label(session, sr))))
except Exception, e:
log("Caught %s" % str(e))
raise Failure("Default SR is not set on the pool (%s)" % sr)
vdis = {}
for disk in disks:
ty = getAttr(disk, "format")
if ty <> "raw":
raise Failure("Expected all disks to have format = raw")
filename = getAttr(disk, "file")
size = os.path.getsize(filename)
_type = "user"
if getAttr(disk, "use") == "system":
_type = "system"
vdi_info = {
"name_label": filename,
"name_description": "",
"SR": sr,
"virtual_size": str(size),
"type": _type,
"sharable": False,
"read_only": False,
"other_config": {},
}
vdi = value(server.VDI.create(session, vdi_info))
log("Created VDI %s for %s" % (vdi, filename))
vdis[filename] = vdi
for drive in drives:
disk = getAttr(drive, "disk")
target = getAttr(drive, "target")
vdi = vdis[disk]
bootable = drive == drives[0]
vbd_info = {
"VM": vm,
"VDI": vdi,
"userdevice": target,
"bootable": bootable,
"mode": "RW",
"type": "Disk",
"empty": False,
"other_config": { "owner": "true" },
"qos_algorithm_type": "",
"qos_algorithm_params": {},
}
vbd = value(server.VBD.create(session, vbd_info))
log("Created VBD %s for %s" % (vbd, disk))
return (vm, vdis)
CURL = "/usr/bin/curl"
if not(os.path.exists(CURL)):
raise Failure("%s doesn't exist" % CURL)
import commands
def import_vdi(url, session, vdi, filename):
cmd = "%s -T%s %s/import_raw_vdi?session_id=%s\&vdi=%s" % (CURL, filename, url, session, vdi)
log("%s" % cmd)
(code, output) = commands.getstatusoutput(cmd)
if code <> 0:
log("Disk upload failed: %s" % output)
raise Failure("disk upload failed")
if __name__ == "__main__":
from optparse import OptionParser
settings = {
"log": "stdout:",
"server": "http://127.0.0.1",
"username": "root",
"password": "",
}
log("settings = %s" % repr(settings))
parser = OptionParser(usage="usage: %prog [options] filename.xml")
parser.add_option("-l", "--log", dest="logfile", help="log to LOG", metavar="LOG")
parser.add_option("-s", "--server", dest="server", help="connect to SERVER", metavar="SERVER")
parser.add_option("-u", "--username", dest="username", help="login as USERNAME", metavar="USERNAME")
parser.add_option("-p", "--password", dest="password", help="use password PASSWORD", metavar="PASSWORD")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("wrong number of arguments")
options = options.__dict__
for setting in settings:
if setting in options and options[setting]:
settings[setting] = options[setting]
s = repr(settings[setting])
if setting == "password":
s = "*REDACTED*"
log("option settings[%s] <- %s" % (setting, s))
if settings["log"] == "syslog:":
use_syslog = True
reopenlog(None)
elif settings["log"] == "stdout:":
use_syslog = False
reopenlog("stdout:")
else:
use_syslog = False
reopenlog(settings["log"])
server = xmlrpclib.Server(settings["server"])
session = value(server.session.login_with_password(settings["username"], settings["password"], "1.0", "xen-api-scripts-import-boxgrinder"))
try:
(vm, vdis) = import_metadata(server, session, args[0])
for filename in vdis.keys():
import_vdi(settings["server"], session, vdis[filename], filename)
log("VM import complete")
log("%s" % vm)
finally:
value(server.session.logout(session))
| lgpl-2.1 |
ikcalB/linuxcnc-mirror | lib/python/gladevcp/hal_gremlin.py | 5 | 11710 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# 2014 Steffen Noack
# add property 'mouse_btn_mode'
# 0 = default: left rotate, middle move, right zoom
# 1 = left zoom, middle move, right rotate
# 2 = left move, middle rotate, right zoom
# 3 = left zoom, middle rotate, right move
# 4 = left move, middle zoom, right rotate
# 5 = left rotate, middle zoom, right move
# 2015 Moses McKnight introduced mode 6
# 6 = left move, middle zoom, right zoom (no rotate - for 2D plasma machines or lathes)
import os
import gtk, gobject
import linuxcnc
import gremlin
import rs274.glcanon
import gcode
from hal_actions import _EMC_ActionBase
from hal_glib import GStat
class HAL_Gremlin(gremlin.Gremlin, _EMC_ActionBase):
__gtype_name__ = "HAL_Gremlin"
__gsignals__ = {
'line-clicked': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_INT,)),
'gcode_error': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
}
__gproperties__ = {
'view' : ( gobject.TYPE_STRING, 'View type', 'Default view: p, x, y, y2, z, z2',
'p', gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'enable_dro' : ( gobject.TYPE_BOOLEAN, 'Enable DRO', 'Show DRO on graphics',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'metric_units' : ( gobject.TYPE_BOOLEAN, 'Use Metric Units', 'Show DRO in metric or imperial units',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_relative' : ( gobject.TYPE_BOOLEAN, 'Show Relative', 'Show DRO relative to active system or machine origin',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_commanded' : ( gobject.TYPE_BOOLEAN, 'Show Commanded', 'Show commanded or actual position',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_extents_option' : ( gobject.TYPE_BOOLEAN, 'Show Extents', 'Show machine extents',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_limits' : ( gobject.TYPE_BOOLEAN, 'Show limits', 'Show machine limits',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_live_plot' : ( gobject.TYPE_BOOLEAN, 'Show live plot', 'Show machine plot',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_velocity' : ( gobject.TYPE_BOOLEAN, 'Show tool speed', 'Show tool velocity',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_program' : ( gobject.TYPE_BOOLEAN, 'Show program', 'Show program',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_rapids' : ( gobject.TYPE_BOOLEAN, 'Show rapids', 'Show rapid moves',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_tool' : ( gobject.TYPE_BOOLEAN, 'Show tool', 'Show tool',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_dtg' : ( gobject.TYPE_BOOLEAN, 'Show DTG', 'Show Distance To Go',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_lathe_radius' : ( gobject.TYPE_BOOLEAN, 'Show Lathe Radius', 'Show X axis in Radius',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'grid_size' : ( gobject.TYPE_FLOAT, 'Grid Size', 'Grid Size',
0, 100, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_joints_mode' : ( gobject.TYPE_BOOLEAN, 'Use joints mode', 'Use joints mode',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_default_controls' : ( gobject.TYPE_BOOLEAN, 'Use Default Mouse Controls', 'Use Default Mouse Controls',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'mouse_btn_mode' : ( gobject.TYPE_INT, 'Mouse Button Mode',
('Mousebutton assignment, l means left, m middle, r right \n'
'0 = default: l-rotate, m-move, r-zoom \n'
'1 = l-zoom, m-move, r-rotate\n'
'2 = l-move, m-rotate, r-zoom\n'
'3 = l-zoom, m-rotate, r-move\n'
'4 = l-move, m-zoom, r-rotate\n'
'5 = l-rotate, m-zoom, r-move\n'
'6 = l-move, m-zoom, r-zoom'),
0, 6, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self, *a, **kw):
gobject.GObject.__init__(self)
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
inifile = linuxcnc.ini(inifile)
gremlin.Gremlin.__init__(self, inifile)
self._reload_filename = None
self.gstat = GStat()
self.gstat.connect('file-loaded', self.fileloaded)
self.gstat.connect('reload-display', self.reloadfile)
self.show()
def reloadfile(self,w):
try:
self.fileloaded(None,self._reload_filename)
except:
pass
def fileloaded(self,w,f):
self._reload_filename=f
try:
self._load(f)
except AttributeError,detail:
#AttributeError: 'NoneType' object has no attribute 'gl_end'
print 'hal_gremlin: continuing after',detail
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'view':
return self.current_view
elif name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'view':
view = value.lower()
if self.lathe_option:
if view not in ['p','y','y2']:
return False
elif view not in ['p', 'x', 'y', 'z', 'z2']:
return False
self.current_view = view
if self.initialised:
self.set_current_view()
elif name == 'enable_dro':
self.enable_dro = value
elif name == 'metric_units':
self.metric_units = value
elif name in self.__gproperties.keys():
setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
self.queue_draw()
return True
# This overrides glcannon.py method so we can change the DRO
def dro_format(self,s,spd,dtg,limit,homed,positions,axisdtg,g5x_offset,g92_offset,tlo_offset):
if not self.enable_dro:
return limit, homed, [''], ['']
if self.metric_units:
format = "% 6s:% 9.3f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.3f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.3f G92 %1s:% 9.3f"
rotformat = "% 5s %1s:% 9.3f"
else:
format = "% 6s:% 9.4f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.4f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.4f G92 %1s:% 9.4f"
rotformat = "% 5s %1s:% 9.4f"
diaformat = " " + format
posstrs = []
droposstrs = []
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
posstrs.append(format % (a, positions[i]))
if self.show_dtg:
droposstrs.append(droformat % (a, positions[i], a, axisdtg[i]))
else:
droposstrs.append(droformat % (a, positions[i]))
droposstrs.append("")
for i in range(9):
index = s.g5x_index
if index<7:
label = "G5%d" % (index+3)
else:
label = "G59.%d" % (index-6)
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(offsetformat % (label, a, g5x_offset[i], a, g92_offset[i]))
droposstrs.append(rotformat % (label, 'R', s.rotation_xy))
droposstrs.append("")
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(rotformat % ("TLO", a, tlo_offset[i]))
# if its a lathe only show radius or diameter as per property
if self.is_lathe():
posstrs[0] = ""
if self.show_lathe_radius:
posstrs.insert(1, format % ("Rad", positions[0]))
else:
posstrs.insert(1, format % ("Dia", positions[0]*2.0))
droposstrs[0] = ""
if self.show_dtg:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0], "R", axisdtg[0]))
else:
droposstrs.insert(1, droformat % ("Dia", positions[0]*2.0, "D", axisdtg[0]*2.0))
else:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0]))
else:
droposstrs.insert(1, diaformat % ("Dia", positions[0]*2.0))
if self.show_velocity:
posstrs.append(format % ("Vel", spd))
pos=0
for i in range(9):
if s.axis_mask & (1<<i): pos +=1
if self.is_lathe:
pos +=1
droposstrs.insert(pos, " " + format % ("Vel", spd))
if self.show_dtg:
posstrs.append(format % ("DTG", dtg))
return limit, homed, posstrs, droposstrs
# Override gremlin's / glcannon.py function so we can emit a GObject signal
def update_highlight_variable(self,line):
self.highlight_line = line
if line == None:
line = -1
self.emit('line-clicked', line)
def realize(self, widget):
gremlin.Gremlin.realize(self, widget)
@rs274.glcanon.with_context
def _load(self, filename):
return self.load(filename)
def report_gcode_error(self, result, seq, filename):
error_str = gcode.strerror(result)
errortext = "G-Code error in " + os.path.basename(filename) + "\n" + "Near line " \
+ str(seq) + " of\n" + filename + "\n" + error_str + "\n"
print(errortext)
self.emit("gcode-error", errortext)
| lgpl-2.1 |
bowlofstew/Impala | thirdparty/hive-1.1.0-cdh5.7.0-SNAPSHOT/lib/py/hive_metastore/constants.py | 45 | 1159 | #
# Autogenerated by Thrift Compiler (0.9.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
DDL_TIME = "transient_lastDdlTime"
HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
IS_ARCHIVED = "is_archived"
ORIGINAL_LOCATION = "original_location"
IS_IMMUTABLE = "immutable"
META_TABLE_COLUMNS = "columns"
META_TABLE_COLUMN_TYPES = "columns.types"
BUCKET_FIELD_NAME = "bucket_field_name"
BUCKET_COUNT = "bucket_count"
FIELD_TO_DIMENSION = "field_to_dimension"
META_TABLE_NAME = "name"
META_TABLE_DB = "db"
META_TABLE_LOCATION = "location"
META_TABLE_SERDE = "serde"
META_TABLE_PARTITION_COLUMNS = "partition_columns"
META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types"
FILE_INPUT_FORMAT = "file.inputformat"
FILE_OUTPUT_FORMAT = "file.outputformat"
META_TABLE_STORAGE = "storage_handler"
TABLE_IS_TRANSACTIONAL = "transactional"
TABLE_NO_AUTO_COMPACT = "no_auto_compaction"
| apache-2.0 |
dablak/boto | boto/mashups/interactive.py | 119 | 2737 | # Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import socket
import sys
# windows does not have termios...
try:
import termios
import tty
has_termios = True
except ImportError:
has_termios = False
def interactive_shell(chan):
if has_termios:
posix_shell(chan)
else:
windows_shell(chan)
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = chan.recv(1024)
if len(x) == 0:
print '\r\n*** EOF\r\n',
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
# thanks to Mike Looijmans for this code
def windows_shell(chan):
import threading
sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
def writeall(sock):
while True:
data = sock.recv(256)
if not data:
sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
sys.stdout.flush()
break
sys.stdout.write(data)
sys.stdout.flush()
writer = threading.Thread(target=writeall, args=(chan,))
writer.start()
try:
while True:
d = sys.stdin.read(1)
if not d:
break
chan.send(d)
except EOFError:
# user hit ^Z or F6
pass
| mit |
lkundrak/scraperwiki | web/api/viewshandlers.py | 1 | 31419 | import urllib
import urllib2
from django.contrib.sites.models import Site
from django.conf import settings
from django.template import RequestContext, loader, Context
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotFound
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from settings import MAX_API_ITEMS, API_URL
from django.views.decorators.http import condition
from tagging.models import Tag
from models import APIMetric
import csv
import datetime
import re
import PyRSS2Gen
from django.utils.encoding import smart_str
from django.core.serializers.json import DateTimeAwareJSONEncoder
from frontend.models import UserProfile
from codewiki.models import Scraper, Code, UserCodeRole, ScraperRunEvent, CodePermission, scraper_search_query, scraper_search_query_unordered, scrapers_overdue
from codewiki.managers.datastore import DataStore
import frontend
from cStringIO import StringIO
try: import json
except: import simplejson as json
def getscraperorresponse(short_name):
try:
scraper = Code.objects.get(short_name=short_name)
except Code.DoesNotExist:
return None, "Sorry, this scraper does not exist"
# if not scraper.actionauthorized(user, "apidataread"):
# return scraper.authorizationfailedmessage(user, "apidataread").get("body")
return scraper, None
# see http://stackoverflow.com/questions/1189111/unicode-to-utf8-for-csv-files-python-via-xlrd
def stringnot(v):
if v == None:
return ""
if type(v) in [unicode, str]:
return v.encode("utf-8")
return v
def stream_rows(dataproxy, format):
n = 0
while True:
line = dataproxy.receiveonelinenj()
try:
ret = json.loads(line)
except ValueError, e:
yield str(e)
break
if "error" in ret:
yield str(ret)
break
fout = StringIO()
# csv and json numerical values are typed, but not htmltable numerics
if format == "csv":
writer = csv.writer(fout, dialect='excel')
if n == 0:
writer.writerow([ k.encode('utf-8') for k in ret["keys"] ])
for row in ret["data"]:
writer.writerow([ stringnot(v) for v in row ])
elif format == "htmltable":
if n == 0:
# there seems to be an 8px margin imposed on the body tag when delivering a page that has no <body> tag
fout.write('<table border="1" style="border-collapse:collapse; ">\n')
fout.write("<tr> <th>%s</th> </tr>\n" % ("</th> <th>".join([ k.encode('utf-8') for k in ret["keys"] ])))
for row in ret["data"]:
fout.write("<tr> <td>%s</td> </tr>\n" % ("</td> <td>".join([ str(stringnot(v)).replace("<", "<") for v in row ])))
else:
assert False, "Bad format "+format
yield fout.getvalue()
n += 1
if not ret.get("moredata"):
if format == "htmltable":
yield "</table>\n"
break
# formats that should be easy to stream because they are line based
# may also work for jsondict if we close the bracket ourselves
def out_csvhtml(dataproxy, short_name, format):
strea = stream_rows(dataproxy, format)
if format == "csv":
mimetype = 'text/csv; charset=utf-8'
else:
mimetype = 'text/html; charset=utf-8'
response = HttpResponse(mimetype=mimetype) # used to take strea
#response = HttpResponse(strea, mimetype='text/csv') # when streamchunking was tried
if format == "csv":
response['Content-Disposition'] = 'attachment; filename=%s.csv' % (short_name)
for s in strea:
response.write(s)
dataproxy.close()
# unless you put in a content length, the middleware will measure the length of your data
# (unhelpfully consuming everything in your generator) before then returning a zero length result
#response["Content-Length"] = 1000000000
return response
# TODO: Fix this so that we can stream the results to either the browser
# or the download. Currently this dies on large data ~38k rows (depending
# on query) with a timeout and so the user gets nothing, but maybe we should
# do iterating over the results as they come in and part-encoding the
# stream with each row?
def out_json(dataproxy, callback, short_name, format):
# json is not chunked. The output is of finite fixed bite sizes because
# it is generally used by browsers which aren't going to survive a huge
# download; however could chunk the jsondict type stream_wise as above
# by manually creating the outer bracketing as with htmltable.
result = dataproxy.receiveonelinenj() # no streaming rows because streamchunking value was not set
if not result:
dataproxy.close()
return HttpResponse("Error: Dataproxy responded with an invalid response")
if format == "jsondict":
try:
res = json.loads(result)
while res.get('stillproducing') == 'yes':
dresult = json.loads(dataproxy.receiveonelinenj())
res['data'].extend(dresult['data'])
res['stillproducing'] = dresult.get('stillproducing')
except ValueError, e:
dataproxy.close()
return HttpResponse("Error: %s" % (e.message,))
if "error" not in res:
dictlist = [ dict(zip(res["keys"], values)) for values in res["data"] ]
result = json.dumps(dictlist, cls=DateTimeAwareJSONEncoder, indent=4)
else:
assert format == "jsonlist"
if callback:
result = "%s(%s)" % (callback, result)
response = HttpResponse(result, mimetype='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=%s.json' % (short_name)
dataproxy.close()
return response
def out_rss2(dataproxy, scraper):
result = dataproxy.receiveonelinenj() # no streaming rows because streamchunking value was not set
try:
res = json.loads(result)
except ValueError, e:
return HttpResponse("Error:%s" % (e.message,))
if "error" in res:
return HttpResponse("Error2: %s" % res["error"])
keymatches = { }
if "guid" not in res["keys"] and "link" in res["keys"]:
keymatches["guid"] = "link"
if "pubDate" not in res["keys"] and "date" in res["keys"]:
keymatches["pubDate"] = "date"
rsskeys = ["title", "link", "description", "guid", "pubDate"]
missingkeys = [ key for key in rsskeys if key not in res["keys"] and key not in keymatches ]
if missingkeys:
return HttpResponse("Error3: You are missing the following keys in the table: %s" % str(missingkeys))
items = [ ]
for value in res["data"]:
ddata = dict(zip(res["keys"], value))
# usual datetime conversion mess!
spubDate = re.findall("\d+", ddata[keymatches.get("pubDate", "pubDate")])
try:
pubDate = datetime.datetime(*map(int, spubDate[:6]))
except Exception, e:
return HttpResponse("Date conversion error: %s\n%s" % (str(e), str(ddata)))
guid = PyRSS2Gen.Guid(ddata[keymatches.get("guid", "guid")])
rssitem = PyRSS2Gen.RSSItem(title=ddata["title"], link=ddata["link"], description=ddata["description"], guid=guid, pubDate=pubDate)
items.append(rssitem)
current_site = Site.objects.get_current()
link = reverse('code_overview', args=[scraper.wiki_type, scraper.short_name])
link = 'https://%s%s' % (current_site.domain,link,)
rss = PyRSS2Gen.RSS2(title=scraper.title, link=link, description=scraper.description_safepart(), lastBuildDate=datetime.datetime.now(), items=items)
fout = StringIO()
rss.write_xml(fout)
return HttpResponse(fout.getvalue(), mimetype='application/rss+xml; charset=utf-8')
# ***Streamchunking could all be working, but for not being able to set the Content-Length
# inexact values give errors in apache, so it would be handy if it could have a setting where
# it organized some chunking instead
# see http://stackoverflow.com/questions/2922874/how-to-stream-an-httpresponse-with-django
# setting the Content-Length to -1 to prevent middleware from consuming the generator to measure it
# causes an error in the apache server. same for a too long content length
# Should consider giving transfer-coding: chunked,
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6
# Streaming is only happening from the dataproxy into here. Streaming
# from here out through django is nearly impossible as we don't know
# the length of the output file if we incrementally build the csv output;
# the generator code has therefore been undone,
# all for want of setting response["Content-Length"] to the correct value.
@condition(etag_func=None)
def sqlite_handler(request):
short_name = request.GET.get('name')
apikey = request.GET.get('apikey', None)
scraper,err = getscraperorresponse(short_name)
if err:
result = json.dumps({'error':err, "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
u,s,kd = None, None, ""
if request.user.is_authenticated():
u = request.user
if scraper.privacy_status != "private":
s = scraper # XX why this only when not private? FAI
kd = short_name
else:
# When private we MUST have an apikey and it should match
if not scraper.api_actionauthorized(apikey):
result = json.dumps({'error':"Invalid API Key", "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
APIMetric.record( "sqlite", key_data=kd, user=u, code_object=s )
dataproxy = DataStore(request.GET.get('name'))
lattachlist = request.GET.get('attach', '').split(";")
attachlist = [ ]
for aattach in lattachlist:
if aattach:
aa = aattach.split(",")
attachi = {"name":aa[0], "asname":(len(aa) == 2 and aa[1] or None)}
attachlist.append(attachi)
dataproxy.request({"maincommand":"sqlitecommand", "command":"attach", "name":attachi["name"], "asname":attachi["asname"]})
sqlquery = request.GET.get('query', "")
format = request.GET.get("format", "json")
if format == "json":
format = "jsondict"
req = {"maincommand":"sqliteexecute", "sqlquery":sqlquery, "data":None, "attachlist":attachlist}
if format == "csv":
req["streamchunking"] = 1000
# This is inlined from the dataproxy.request() function to allow for
# receiveoneline to perform multiple readlines in this case.
# (this is the stream-chunking thing. the right interface is not yet
# apparent)
dataproxy.m_socket.sendall(json.dumps(req) + '\n')
if format not in ["jsondict", "jsonlist", "csv", "htmltable", "rss2"]:
dataproxy.close()
return HttpResponse("Error: the format '%s' is not supported" % format)
if format in ["csv", 'htmltable']:
return out_csvhtml(dataproxy, scraper.short_name, format)
if format == "rss2":
return out_rss2(dataproxy, scraper)
return out_json(dataproxy, request.GET.get("callback"),
scraper.short_name, format)
def scraper_search_handler(request):
apikey = request.GET.get('apikey', None)
query = request.GET.get('query')
if not query:
query = request.GET.get('searchquery')
try:
maxrows = int(request.GET.get('maxrows', ""))
except ValueError:
maxrows = 5
result = [ ] # list of dicts
boverduescraperrequest = False
if query == "*OVERDUE*":
# We should check apikey against our shared secret. If it matches then it should
# be allowed to continue.
if request.META.get("HTTP_X_REAL_IP", "Not specified") in settings.INTERNAL_IPS:
boverduescraperrequest = True
if settings.INTERNAL_IPS == ["IGNORETHIS_IPS_CONSTRAINT"] or '127.0.0.1' in settings.INTERNAL_IPS:
boverduescraperrequest = True
else:
u = None
if request.user.is_authenticated():
u = request.user
APIMetric.record( "scrapersearch", key_data=query, user=u, code_object=None )
# TODO: If the user has specified an API key then we should pass them into
# the search query and refine the resultset to show only valid scrapers
if boverduescraperrequest:
scrapers_all = scrapers_overdue()
else:
scrapers_all = scraper_search_query_unordered(user=None, query=query, apikey=apikey)
# scrapers we don't want to be returned in the search
nolist = request.GET.get("nolist", "").split()
quietfields = request.GET.get('quietfields', "").split("|")
#offset = request.GET.get('offset', 0)
srequestinguser = request.GET.get("requestinguser", "")
lrequestinguser = User.objects.filter(username=srequestinguser)
if lrequestinguser:
requestinguser = lrequestinguser[0]
else:
requestinguser = None
# convert the query into an ordered list
if boverduescraperrequest:
scraperlist = scrapers_all
# probably a way of sorting by some ranking on these ownership fields right in the database
elif requestinguser:
scraperlist = list(scrapers_all.distinct())
for scraper in scraperlist:
usercoderoles = UserCodeRole.objects.filter(code=scraper, user=requestinguser)
if usercoderoles:
if usercoderoles[0].role == "owner":
scraper.colleaguescore = (3, scraper.short_name) # created_at
elif usercoderoles[0].role == "editor":
scraper.colleaguescore = (2, scraper.short_name) # created_at
else:
scraper.colleaguescore = (1, scraper.short_name) # created_at
else:
scraper.colleaguescore = (0, scraper.short_name) # created_at
scraperlist.sort(key=lambda user:user.colleaguescore, reverse=True)
else:
scrapers_all = scrapers_all.order_by('-created_at')
scraperlist = scrapers_all.distinct()[:(maxrows+len(nolist))]
for scraper in scraperlist:
if scraper.short_name in nolist:
continue
res = {'short_name':scraper.short_name }
res['title'] = scraper.title
owners = scraper.userrolemap()["owner"]
if owners:
owner = owners[0]
try:
profile = owner.get_profile()
ownername = profile.name
if boverduescraperrequest:
res['beta_user'] = profile.beta_user # to enable certain scrapers to go through the lxc process
except frontend.models.UserProfile.DoesNotExist:
ownername = owner.username
if not ownername:
ownername = owner.username
if ownername:
res['title'] = "%s / %s" % (ownername, scraper.title)
if 'description' not in quietfields:
res['description'] = scraper.description_safepart()
res['created'] = scraper.created_at.isoformat()
res['privacy_status'] = scraper.privacy_status
res['language'] = scraper.language
# extra data added to the overdue request kind so that twister has everything it needs to get on with it
# and doesn't need to call back for further information
if boverduescraperrequest:
res['overdue_proportion'] = float(scraper.overdue_proportion)
vcsstatus = scraper.get_vcs_status(-1)
res['code'] = vcsstatus.get("code", "#Code not previously saved")
res["rev"] = vcsstatus.get("prevcommit", {}).get("rev", -1)
res['guid'] = scraper.guid
res["attachables"] = [ ascraper.short_name for ascraper in scraper.attachable_scraperdatabases() ]
res["envvars"] = scraper.description_envvars()
result.append(res)
if len(result) > maxrows:
break
if request.GET.get("format") == "csv":
fout = StringIO()
writer = csv.writer(fout, dialect='excel')
headers = [ 'short_name', 'title', 'description', 'created', 'privacy_status' ]
writer.writerow(headers)
for r in result:
writer.writerow([r[header] for header in headers])
response = HttpResponse(fout.getvalue(), mimetype='text/csv')
response['Content-Disposition'] = 'attachment; filename=search.csv'
return response
res = json.dumps(result, indent=4)
callback = request.GET.get("callback")
if callback:
res = "%s(%s)" % (callback, res)
response = HttpResponse(res, mimetype='application/json; charset=utf-8')
#response['Content-Disposition'] = 'attachment; filename=search.json'
return response
def usersearch_handler(request):
query = request.GET.get('searchquery')
try:
maxrows = int(request.GET.get('maxrows', ""))
except ValueError:
maxrows = 5
u = None
if request.user.is_authenticated():
u = request.user
APIMetric.record( "usersearch", key_data=query, user=u, code_object=None )
# usernames we don't want to be returned in the search
nolist = request.GET.get("nolist", "").split()
srequestinguser = request.GET.get("requestinguser", "")
lrequestinguser = User.objects.filter(username=srequestinguser)
if lrequestinguser:
requestinguser = lrequestinguser[0]
else:
requestinguser = None
if query:
users = User.objects.filter(username__icontains=query)
userprofiles = User.objects.filter(userprofile__name__icontains=query)
users_all = users | userprofiles
else:
users_all = User.objects.all()
users_all = users_all.order_by('username')
# if there is a requestinguser, then rank by overlaps and sort
# (inefficient, but I got no other ideas right now)
# (could be doing something with scraper.userrolemap())
if requestinguser:
requestuserscraperset = set([usercoderole.code.short_name for usercoderole in requestinguser.usercoderole_set.all()])
userlist = list(users_all)
for user in userlist:
user.colleaguescore = len(requestuserscraperset.intersection([usercoderole.code.short_name for usercoderole in user.usercoderole_set.all()]))
userlist.sort(key=lambda user:user.colleaguescore, reverse=True)
#for user in userlist:
# print (user, user.colleaguescore)
else:
userlist = users_all[:(maxrows+len(nolist))]
result = [ ]
for user in userlist:
if user.username not in nolist:
res = {'username':user.username, "profilename":user.get_profile().name, "date_joined":user.date_joined.isoformat() }
result.append(res)
if len(result) > maxrows:
break
res = json.dumps(result, indent=4)
callback = request.GET.get("callback")
if callback:
res = "%s(%s)" % (callback, res)
response = HttpResponse(res, mimetype='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=search.json'
return response
def userinfo_handler(request):
username = request.GET.get('username', "")
apikey = request.GET.get('apikey', "")
users = User.objects.filter(username=username)
result = [ ]
for user in users: # list of users is normally 1
info = { "username":user.username, "profilename":user.get_profile().name }
info["datejoined"] = user.date_joined.isoformat()
info['coderoles'] = { }
for ucrole in user.usercoderole_set.exclude(code__privacy_status="deleted"):
if ucrole.code.privacy_status != "private":
if ucrole.role not in info['coderoles']:
info['coderoles'][ucrole.role] = [ ]
info['coderoles'][ucrole.role].append(ucrole.code.short_name)
elif apikey:
try:
api_user = UserProfile.objects.get(apikey=apikey).user
if api_user.usercoderole_set.filter(code__short_name=ucrole.code.short_name):
if ucrole.role not in info['coderoles']:
info['coderoles'][ucrole.role] = [ ]
info['coderoles'][ucrole.role].append(ucrole.code.short_name)
except UserProfile.DoesNotExist:
pass
result.append(info)
u = None
if request.user.is_authenticated():
u = request.user
APIMetric.record( "getuserinfo", key_data=username, user=u, code_object=None )
res = json.dumps(result, indent=4)
callback = request.GET.get("callback")
if callback:
res = "%s(%s)" % (callback, res)
response = HttpResponse(res, mimetype='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=userinfo.json'
return response
def runevent_handler(request):
apikey = request.GET.get('apikey', None)
short_name = request.GET.get('name')
scraper,err = getscraperorresponse(short_name)
if err:
result = json.dumps({'error':err, "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
kd = scraper.short_name
s = scraper
# Check accessibility if this scraper is private using
# apikey
if not scraper.api_actionauthorized(apikey):
result = json.dumps({'error':"Invalid API Key", "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
if scraper.privacy_status == 'private': # XXX not sure why we do this, do metrics not work with private? FAI
kd,s = '', None
u = None
if request.user.is_authenticated():
u = request.user
APIMetric.record( "runeventinfo", key_data=kd, user=u, code_object=s )
runid = request.GET.get('runid', '-1')
runevent = None
if scraper.wiki_type != "view":
# negative index counts back from the most recent run
if runid[0] == '-':
try:
i = -int(runid) - 1
runevents = scraper.scraper.scraperrunevent_set.all().order_by('-run_started')
if i < len(runevents):
runevent = runevents[i]
except ValueError:
pass
if not runevent:
try:
runevent = scraper.scraper.scraperrunevent_set.get(run_id=runid)
except ScraperRunEvent.DoesNotExist:
pass
if not runevent:
result = json.dumps({'error':"run_event not found", "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
info = { "runid":runevent.run_id, "run_started":runevent.run_started.isoformat(),
"records_produced":runevent.records_produced, "pages_scraped":runevent.pages_scraped }
if runevent.run_ended:
info['run_ended'] = runevent.run_ended.isoformat()
if runevent.exception_message:
info['exception_message'] = runevent.exception_message
info['output'] = runevent.output
if runevent.first_url_scraped:
info['first_url_scraped'] = runevent.first_url_scraped
domainsscraped = [ ]
for domainscrape in runevent.domainscrape_set.all():
domainsscraped.append({'domain':domainscrape.domain, 'bytes':domainscrape.bytes_scraped, 'pages':domainscrape.pages_scraped})
if domainsscraped:
info['domainsscraped'] = domainsscraped
result = [info] # a list with one element
res = json.dumps(result, indent=4)
callback = request.GET.get("callback")
if callback:
res = "%s(%s)" % (callback, res)
response = HttpResponse(res, mimetype='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=runevent.json'
return response
def convert_history(commitentry):
result = { 'version':commitentry['rev'], 'date':commitentry['date'].isoformat() }
if 'user' in commitentry:
result["user"] = commitentry['user'].username
lsession = commitentry['description'].split('|||')
if len(lsession) == 2:
result['session'] = lsession[0]
return result
def convert_run_event(runevent):
result = { "runid":runevent.run_id, "run_started":runevent.run_started.isoformat(),
"records_produced":runevent.records_produced, "pages_scraped":runevent.pages_scraped,
"still_running":(runevent.pid != -1),
}
if runevent.run_ended:
result['last_update'] = runevent.run_ended.isoformat()
if runevent.exception_message:
result['exception_message'] = runevent.exception_message
return result
def convert_date(date_str):
if not date_str:
return None
try:
#return datetime.datetime.strptime(date_str, '%Y-%m-%d')
return datetime.datetime(*map(int, re.findall("\d+", date_str))) # should handle 2011-01-05 21:30:37
except ValueError:
return None
def scraperinfo_handler(request):
result = [ ]
apikey =request.GET.get('apikey', None)
quietfields = request.GET.get('quietfields', "").split("|")
history_start_date = convert_date(request.GET.get('history_start_date', None))
try:
rev = int(request.GET.get('version', ''))
except ValueError:
rev = None
for short_name in request.GET.get('name', "").split():
scraper,err = getscraperorresponse(short_name)
if err:
result = json.dumps({'error':err, "short_name":short_name})
if request.GET.get("callback"):
result = "%s(%s)" % (request.GET.get("callback"), result)
return HttpResponse(result)
# Check accessibility if this scraper is private using
# apikey
if hasattr(scraper, "privacy_status") and scraper.privacy_status == 'private':
if not scraper.api_actionauthorized(apikey):
scraper = u'Invalid API Key'
if type(scraper) in [str, unicode]:
result.append({'error':scraper, "short_name":short_name})
else:
result.append(scraperinfo(scraper, history_start_date, quietfields, rev))
u = None
if request.user.is_authenticated():
u = request.user
APIMetric.record( "getinfo", key_data=request.GET.get('name', ""), user=u, code_object=None )
res = json.dumps(result, indent=4)
callback = request.GET.get("callback")
if callback:
res = "%s(%s)" % (callback, res)
response = HttpResponse(res, mimetype='application/json; charset=utf-8')
response['Content-Disposition'] = 'attachment; filename=scraperinfo.json'
return response
def scraperinfo(scraper, history_start_date, quietfields, rev):
info = { }
info['short_name'] = scraper.short_name
info['language'] = scraper.language
info['created'] = scraper.created_at.isoformat()
info['title'] = scraper.title
info['description'] = scraper.description_safepart()
info['tags'] = [tag.name for tag in Tag.objects.get_for_object(scraper)]
info['wiki_type'] = scraper.wiki_type
info['privacy_status'] = scraper.privacy_status
if scraper.wiki_type == 'scraper':
info['last_run'] = scraper.scraper.last_run and scraper.scraper.last_run.isoformat() or ''
info['run_interval'] = scraper.scraper.run_interval
attachables = [ ]
for cp in CodePermission.objects.filter(code=scraper).all():
if cp.permitted_object.privacy_status != "deleted":
attachables.append(cp.permitted_object.short_name)
info["attachables"] = attachables
# these ones have to be filtering out the incoming private scraper names
# (the outgoing attach to list doesn't because they're refered in the code as well)
info["attachable_here"] = [ ]
for cp in CodePermission.objects.filter(permitted_object=scraper).all():
if cp.code.privacy_status not in ["deleted", "private"]:
info["attachable_here"].append(cp.code.short_name)
if scraper.wiki_type == 'scraper':
info['records'] = scraper.scraper.record_count # old style datastore
if 'datasummary' not in quietfields:
dataproxy = DataStore(scraper.short_name)
sqlitedata = dataproxy.request({"maincommand":"sqlitecommand", "command":"datasummary", "val1":0, "val2":None})
if sqlitedata and type(sqlitedata) not in [str, unicode]:
info['datasummary'] = sqlitedata
if 'userroles' not in quietfields:
info['userroles'] = { }
for ucrole in scraper.usercoderole_set.all():
if ucrole.role not in info['userroles']:
info['userroles'][ucrole.role] = [ ]
info['userroles'][ucrole.role].append(ucrole.user.username)
status = scraper.get_vcs_status(rev)
if 'code' not in quietfields:
info['code'] = status["code"]
for committag in ["currcommit", "prevcommit", "nextcommit"]:
if committag not in quietfields:
if committag in status:
info[committag] = convert_history(status[committag])
if "currcommit" not in status and "prevcommit" in status and not status["ismodified"]:
if 'filemodifieddate' in status:
info["modifiedcommitdifference"] = str(status["filemodifieddate"] - status["prevcommit"]["date"])
info['filemodifieddate'] = status['filemodifieddate'].isoformat()
if 'history' not in quietfields:
history = [ ]
commitentries = scraper.get_commit_log("code")
for commitentry in commitentries:
if history_start_date and commitentry['date'] < history_start_date:
continue
history.append(convert_history(commitentry))
history.reverse()
info['history'] = history
if scraper.wiki_type == 'scraper' and 'runevents' not in quietfields:
if history_start_date:
runevents = scraper.scraper.scraperrunevent_set.filter(run_ended__gte=history_start_date).order_by('-run_started')
else:
runevents = scraper.scraper.scraperrunevent_set.all().order_by('-run_started')[:2]
info['runevents'] = [ ]
for runevent in runevents:
info['runevents'].append(convert_run_event(runevent))
return info
| agpl-3.0 |
pshuff/sparts | tests/tasks/test_poller.py | 5 | 1315 | # Copyright (c) 2014, Facebook, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
from sparts.tasks.poller import PollerTask
from sparts.tests.base import SingleTaskTestCase
class MyTask(PollerTask):
INTERVAL = 0.1
counter = 0
do_increment = False
num_changes = 0
def fetch(self):
if self.do_increment:
self.counter += 1
return self.counter
def onValueChanged(self, old_value, new_value):
self.num_changes += 1
class PollerTests(SingleTaskTestCase):
TASK = MyTask
def test_value_changed(self):
self.assertEqual(self.task.getValue(), 0)
self.assertEqual(self.task.num_changes, 1) # Change from None => 1
self.task.execute(None)
self.assertEqual(self.task.getValue(), 0)
self.assertEqual(self.task.num_changes, 1)
# Enable incrementing, and force at least one execution
self.task.do_increment = True
self.task.execute(None)
self.task.do_increment = False
self.assertGreater(self.task.getValue(), 0)
self.assertGreater(self.task.num_changes, 1)
| bsd-3-clause |
npardington/fabric-bolt | fabric_bolt/task_runners/channels/consumers.py | 1 | 3109 | import json
import subprocess
from importlib import import_module
import ansiconv
import sys
from channels import Group
from channels.auth import channel_session_user_from_http
from channels.sessions import channel_session
from django.conf import settings
from fabric_bolt.projects.models import Project, Deployment
from fabric_bolt.projects.signals import deployment_finished
from .. import backend
import time
def start_task(message):
time.sleep(1)
project = Project.objects.get(id=message.content['project_id'])
deployment = Deployment.objects.get(id=message.content['deployment_id'])
deployment.output = ''
deployment.save()
engine = import_module(settings.SESSION_ENGINE)
SessionStore = engine.SessionStore
session = SessionStore(message.content['session_key'])
if backend.get_task_details(project, deployment.task.name) is None:
return
process = subprocess.Popen(
backend.build_command(project, deployment, session),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=getattr(settings, 'SHELL', '/bin/sh'),
)
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() is not None:
break
Group("deployment-{}".format(deployment.id)).send({
"text": json.dumps({
'status': 'pending',
'text': str('<span class="output-line">{}</span>'.format(ansiconv.to_html(nextline)))
}),
})
deployment.add_output(nextline)
sys.stdout.flush()
Deployment.objects.filter(pk=deployment.id).update(
status=deployment.SUCCESS if process.returncode == 0 else deployment.FAILED
)
Group("deployment-{}".format(deployment.id)).send({
"text": json.dumps({
'status': deployment.SUCCESS if process.returncode == 0 else deployment.FAILED,
'text': ''
}),
})
deployment_finished.send(deployment, deployment_id=deployment.pk)
# Connected to websocket.connect
@channel_session_user_from_http
def ws_connect(message):
# Work out room name from path (ignore slashes)
deployment_id = message.content['path'].strip("/")
# Save room in session and add us to the group
message.channel_session['deployment_id'] = deployment_id
Group("deployment-{}".format(deployment_id)).add(message.reply_channel)
deployment = Deployment.objects.filter(pk=deployment_id)[0]
Group("deployment-{}".format(deployment_id)).send({
"text": json.dumps({
"text": deployment.get_formatted_output(),
'status': deployment.status
})
})
# Connected to websocket.disconnect
@channel_session
def ws_disconnect(message):
Group("deployment-{}".format(message.channel_session['deployment_id'])).discard(message.reply_channel)
# Connected to websocket.connect
@channel_session_user_from_http
def ws_receive(message):
deployment = Deployment.objects.filter(pk=message.channel_session['deployment_id'])[0]
deployment.add_input(message.content)
| mit |
marissazhou/django | tests/migrations/test_writer.py | 100 | 24300 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import functools
import math
import os
import re
import tokenize
import unittest
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django import get_version
from django.conf import settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.writer import (
MigrationWriter, OperationWriter, SettingsReference,
)
from django.test import SimpleTestCase, ignore_warnings, mock
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.deconstruct import deconstructible
from django.utils.timezone import FixedOffset, get_default_timezone, utc
from django.utils.translation import ugettext_lazy as _
from .models import FoodManager, FoodQuerySet
class TestModel1(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(SimpleTestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
l = {}
try:
exec(string, globals(), l)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return l
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_functions(self):
with six.assertRaisesRegex(self, ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=FixedOffset(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_datetime_safe(self):
self.assertSerializedResultEqual(
datetime_safe.date(2014, 3, 31),
("datetime.date(2014, 3, 31)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.time(10, 25),
("datetime.time(10, 25)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.datetime(2014, 3, 31, 16, 4, 31),
("datetime.datetime(2014, 3, 31, 16, 4, 31)", {'import datetime'})
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$', re.U)
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$', re.U))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$', 32))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.U)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=32)")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with six.assertRaisesRegex(self, ImportError, "No module named '?custom'?"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
def test_serialize_builtins(self):
string, imports = MigrationWriter.serialize(range)
self.assertEqual(string, 'range')
self.assertEqual(imports, set())
@unittest.skipUnless(six.PY2, "Only applies on Python 2")
def test_serialize_direct_function_reference(self):
"""
Ticket #22436: You cannot use a function straight from its body
(e.g. define the method and use it in the same body)
"""
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""
Neither py2 or py3 can serialize a reference in a local scope.
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_local_function_reference_message(self):
"""
Make sure user is seeing which module/function is the issue
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with six.assertRaisesRegex(self, ValueError,
'^Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_serialize_functools_partial(self):
value = functools.partial(datetime.timedelta, 1, seconds=2)
result = self.serialize_round_trip(value)
self.assertEqual(result.func, value.func)
self.assertEqual(result.args, value.args)
self.assertEqual(result.keywords, value.keywords)
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(
name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)
),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# It should NOT be unicode.
self.assertIsInstance(output, six.binary_type, "Migration as_string returned unicode")
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
# In order to preserve compatibility with Python 3.2 unicode literals
# prefix shouldn't be added to strings.
tokens = tokenize.generate_tokens(six.StringIO(str(output)).readline)
for token_type, token_source, (srow, scol), __, line in tokens:
if token_type == tokenize.STRING:
self.assertFalse(
token_source.startswith('u'),
"Unicode literal prefix found at %d:%d: %r" % (
srow, scol, line.strip()
)
)
# Silence warning on Python 2: Not importing directory
# 'tests/migrations/migrations_test_apps/without_init_file/migrations':
# missing __init__.py
@ignore_warnings(category=ImportWarning)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(upath(__file__)))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string().decode('utf-8')
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_migration_file_header_comments(self):
"""
Test comments at top of file.
"""
migration = type(str("Migration"), (migrations.Migration,), {
"operations": []
})
dt = datetime.datetime(2015, 7, 31, 4, 40, 0, 0, tzinfo=utc)
with mock.patch('django.db.migrations.writer.now', lambda: dt):
writer = MigrationWriter(migration)
output = writer.as_string().decode('utf-8')
self.assertTrue(
output.startswith(
"# -*- coding: utf-8 -*-\n"
"# Generated by Django %(version)s on 2015-07-31 04:40\n" % {
'version': get_version(),
}
)
)
def test_models_import_omitted(self):
"""
django.db.models shouldn't be imported if unused.
"""
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.AlterModelOptions(
name='model',
options={'verbose_name': 'model', 'verbose_name_plural': 'models'},
),
]
})
writer = MigrationWriter(migration)
output = writer.as_string().decode('utf-8')
self.assertIn("from django.db import migrations\n", output)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructableInstances(object):
def deconstruct(self):
return ('DeconstructableInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructableInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructableInstances)")
| bsd-3-clause |
gobstones/PyGobstones | pygobstones/gui/mainWindow.py | 1 | 26321 | # -*- coding: utf-8 -*-
from views.gobstonesMain import *
from PyQt4 import QtGui
from PyQt4 import QtCore
import datetime
from views.qDesigner.interactive import *
sys.path.append('..')
from .fileOption import FileOption
from .preferencesWindow import PreferencesWindow
from .editOption import EditOption
from .boardOption import *
from .helpOption import HelpOption
from pygobstones.language.programRun import *
from views.boardPrint.board import *
from views.boardPrint.boardViewer import *
from resultsMainWindow import *
from pygobstones.commons.i18n import *
from pygobstones.commons.paths import root_path
from views.boardPrint.parseBoard import *
import time
import views.resources
import logging
GOBSTONES = 'Gobstones 3.0.0'
XGOBSTONES = 'XGobstones 1.0.0'
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.logger = logging.getLogger()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.initOptions()
self.initMenuBarActions()
self.initSignalsAndSlots()
self.ui.actionStop.setEnabled(False)
self.clothing = 'Gobstones.xml'
self.lang = GOBSTONES
self.initWindowTitle()
self.initPreferencesDictionary()
self.initLoggerSize()
self.initialBoardGenerator = InitialBoardGenerator()
self.guiInterpreterHandler = GUIInterpreterHandler(self)
self.programRun = ProgramRun(self.getLang(),
self.guiInterpreterHandler)
self.rootDirectory = root_path()
self.runButton = RunButton(self, self.ui.actionRun,
self.ui.actionStop)
self.setStyleSheet( "QMainWindow{background-image:url(':/backgroundWidget.png')}")
def initWindowTitle(self):
self.filePath = i18n('Without working directory')
self.updateWindowTitle()
def initMenuBarActions(self):
self.ui.actionNewFile.triggered.connect(self.openNewFileDialog)
self.ui.actionCloseFile.triggered.connect(self.closeFiles)
self.ui.actionOpenFile.triggered.connect(self.openFileDialog)
self.ui.actionSaveAs.triggered.connect(self.saveAsFileDialog)
self.ui.actionSave.triggered.connect(self.saveFile)
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionRun.triggered.connect(self.run)
self.ui.actionStop.triggered.connect(self.stop)
self.ui.actionBoardOptions.triggered.connect(self.openBoardOptions)
self.ui.actionLoadBoard.triggered.connect(self.loadBoard)
self.ui.actionChangeLang.triggered.connect(self.changeLang)
self.ui.actionFind.triggered.connect(self.search)
self.ui.actionReplace.triggered.connect(self.replace)
self.ui.actionFonts.triggered.connect(self.fonts)
self.ui.actionPreferences.triggered.connect(self.openPreferences)
self.ui.actionOpenBoardEditor.triggered.connect(self.openBoardEditor)
self.ui.actionUndo.triggered.connect(self.undo)
self.ui.actionRedo.triggered.connect(self.redo)
self.ui.actionCut.triggered.connect(self.cut)
self.ui.actionCopy.triggered.connect(self.copy)
self.ui.actionPaste.triggered.connect(self.paste)
self.ui.actionSelectAll.triggered.connect(self.selectAll)
self.ui.actionManual.triggered.connect(self.openManual)
self.ui.actionLicense.triggered.connect(self.viewLicense)
self.ui.actionAbout.triggered.connect(self.viewAbout)
self.ui.actionCheck.triggered.connect(self.check)
def initPreferencesDictionary(self):
global preferencesDictionary
preferencesDictionary = {'logger': False,
'roseOfWinds': True,
'cellNumbers': True,
'lineNumbers': True,
'autoIndentation': False,
}
def initLoggerSize(self):
if MainWindow.getPreference('logger'):
self.ui.splitter.setSizes([800, 80])
else:
self.ui.splitter.setSizes([800, 0])
@staticmethod
def getPreference(keyPreference):
return preferencesDictionary[keyPreference]
def setPreference(self, keyPreference, valuePreference):
preferencesDictionary[keyPreference] = valuePreference
def getInitialBoard(self):
return self.initialBoardGenerator.getInitialBoard()
def setInitialBoard(self, board):
self.initialBoardGenerator.setInitialBoard(board)
def setAtNothingBoardOptions(self):
self.initialBoardGenerator.set_nothing_options()
def openBoardOptions(self):
self.boardOption.openBoardOptionWindow(self.initialBoardGenerator)
def openBoardEditor(self):
self.boardOption.openBoardEditor(self.initialBoardGenerator)
def undo(self):
self.editOption.undo()
def redo(self):
self.editOption.redo()
def cut(self):
self.editOption.cut()
def copy(self):
self.editOption.copy()
def paste(self):
self.editOption.paste()
def selectAll(self):
self.editOption.selectAll()
def search(self):
self.editOption.openSearch()
def replace(self):
self.editOption.openReplace()
def fonts(self):
self.editOption.openFontDialog()
def openPreferences(self):
PreferencesWindow(self)
def openManual(self):
self.helpOption.openManual()
def viewLicense(self):
self.helpOption.viewLicense()
def viewAbout(self):
self.helpOption.viewAbout()
def setClothing(self, clothing):
self.clothing = clothing
def getClothing(self):
return self.clothing
def initSignalsAndSlots(self):
self.ui.textEditFile.document().modificationChanged.connect(self.updateTextEditFileUI)
self.ui.textEditLibrary.document().modificationChanged.connect(self.updateTextEditLibraryUI)
def closeFiles(self):
self.fileOption.closeFiles()
def openFileDialog(self):
'''
Purpose: Open a dialog for open a file. Then open the file that
was selected and load it in the first text editor. Additionally
load library.
'''
if self.fileOption.openFiles():
self.fileOpened()
def openNewFileDialog(self):
self.fileOption.newFile()
self.fileOpened()
def closeEvent(self, event):
self.fileOption.closeApp(event)
def loadBoard(self):
self.boardOption.loadBoard()
def saveFile(self):
if self.fileOption.saveFile():
self.fileSaved()
def saveAsFileDialog(self):
if self.fileOption.saveAsFileDialog():
self.fileSaved()
def fileSaved(self):
self.updateCompleters()
def fileOpened(self):
self.updateCompleters()
def programText(self):
return str(self.ui.textEditFile.toPlainText())
def updateCompleters(self):
filename, text = str(self.fileOption.getFileName()), self.programText()
self.ui.textEditFile.updateCompleter(filename, text)
self.ui.textEditLibrary.setCompleter(self.ui.textEditFile.getCompleter())
def initOptions(self):
self.fileOption = FileOption(self)
self.editOption = EditOption(self)
self.editOption.initEditorBehavior()
self.boardOption = BoardOption(self)
self.helpOption = HelpOption(self)
def updateTextEditFileUI(self):
self.editOption.updateEditUI(self.ui.textEditFile, 0)
def updateTextEditLibraryUI(self):
self.editOption.updateEditUI(self.ui.textEditLibrary, 1)
def run(self):
self.ui.logger.clear()
if MainWindow.getPreference('logger') == False:
self.setPreference('logger', True)
self.initLoggerSize()
self.guiInterpreterHandler.wasStoped = False
self.guiInterpreterHandler.showInLog(i18n(
'Start execution || Languaje: ') + self.lang)
self.guiInterpreterHandler.log('----------------' +
str(datetime.datetime.now())[:19] +
'-----------------')
self.ui.logger.show()
self.ui.actionStop.setEnabled(True)
self.ui.actionCheck.setEnabled(False)
self.ui.statusbar.showMessage(QtCore.QString(i18n('Processing...')))
self.programRun.handler = self.guiInterpreterHandler
self.runButton.start(self.programRun)
def stop(self):
self.guiInterpreterHandler.initialStatus()
self.runButton.stopInterpreter()
self.resetButtonsRunAndStop()
self.ui.statusbar.showMessage(QtCore.QString
(i18n('Execution interrupted by the user')))
self.guiInterpreterHandler.showInLog(i18n(
'Execution interrupted by the user'))
self.guiInterpreterHandler.log('----------------' +
str(datetime.datetime.now())[:19] +
'-----------------')
def resetButtonsRunAndStop(self):
self.ui.actionStop.setEnabled(False)
self.ui.actionRun.setEnabled(True)
self.ui.actionCheck.setEnabled(True)
def updateFilePath(self, path):
self.filePath = path
self.updateWindowTitle()
def updateWindowTitle(self):
self.setWindowTitle(self.lang + ' -- ' + self.filePath)
def check(self):
self.ui.actionStop.setEnabled(True)
self.ui.actionCheck.setEnabled(False)
self.ui.actionRun.setEnabled(False)
self.guiInterpreterHandler.showInLog(i18n(
'Start check || Languaje: ') + self.lang)
self.guiInterpreterHandler.log('----------------' +
str(datetime.datetime.now())[:19] +
'-----------------')
self.ui.statusbar.showMessage(QtCore.QString(i18n('Checking...')))
self.checkButton = CheckButton(self)
self.checkButton.start()
def changeLang(self):
if self.lang == GOBSTONES:
self.lang = XGOBSTONES
self.ui.actionChangeLang.setText("XGobstones")
icon = QtGui.QIcon(":/logoXGobstones.png")
self.ui.actionChangeLang.setIcon(icon)
self.ui.set_highlighter(XGobstonesHighlighter)
else:
self.lang = GOBSTONES
self.ui.actionChangeLang.setText("Gobstones")
icon = QtGui.QIcon(":/logoGobstones.png")
self.ui.actionChangeLang.setIcon(icon)
self.ui.set_highlighter(GobstonesHighlighter)
self.guiInterpreterHandler.showInLog(i18n
("The languaje was changed to ") + self.lang)
self.updateWindowTitle()
self.programRun = ProgramRun(self.getLang(),
self.guiInterpreterHandler)
def getLang(self):
if self.lang == GOBSTONES:
return 'gobstones'
else:
return 'xgobstones'
# RUN BUTTON .....................
class GUIInterpreterHandler(EjecutionFailureHandler, EjecutionHandler):
def __init__(self, mainW):
self.mainW = mainW
self.wasStoped = False
self.isOpenInteractiveW = False
self.interactiveW = InteractiveWindow(self.mainW)
self.interactiveRunning = False
self.failure_dict = {
EjecutionFailureHandler.DEFAULT: self.interpreter_log_default_exception,
EjecutionFailureHandler.PARSER_FAILURE: self.interpreter_log_failure,
EjecutionFailureHandler.STATIC_FAILURE: self.interpreter_log_failure,
EjecutionFailureHandler.DYNAMIC_FAILURE: self.interpreter_boom_failure,
}
super(GUIInterpreterHandler, self).__init__(self.failure_dict)
def initialStatus(self):
self.wasStoped = False
self.isOpenInteractiveW = False
self.interactiveW = InteractiveWindow(self.mainW)
self.interactiveRunning = False
def success(self, board_string, result):
if not self.interactiveRunning:
if not self.wasStoped:
self.mainW.ui.statusbar.showMessage(QtCore.QString
(i18n('Execution completed')))
self.results = Results(self.mainW)
board = self.prepareString(board_string)
self.results.setInitialBoard(BoardViewer(self,
self.mainW.initialBoardGenerator.board, self.mainW.getClothing()))
self.results.setFinalBoard(BoardViewer(self,
parseABoardString(board), self.mainW.getClothing()))
self.results.setRetVars(result)
self.setCodeInResults()
self.results.ui.tabWidgetResults.setCurrentIndex(2)
self.results.show()
self.mainW.resetButtonsRunAndStop()
self.showInLog(i18n('Execution completed'))
self.log('----------------'+
str(datetime.datetime.now())[:19] +
'-----------------\n')
else:
self.mainW.ui.statusbar.showMessage(QtCore.QString
(i18n('Execution completed')))
self.showInLog(i18n('Execution completed'))
self.log('----------------'+
str(datetime.datetime.now())[:19] +
'-----------------\n')
self.interactiveW.setStatusMessage(' ' + i18n('Execution completed'))
self.mainW.resetButtonsRunAndStop()
self.wasStoped = False
self.isOpenInteractiveW = False
self.interactiveRunning = False
def read_request(self):
self.interactiveRunning = True
if (not self.isOpenInteractiveW):
self.isOpenInteractiveW = True
self.partialBoard = self.mainW.initialBoardGenerator.getStringBoard()
self.interactiveW.initialStatus(self.partialBoard)
self.interactiveW.show()
def partial(self, board_str):
self.interactiveW.setPressAKeyState()
self.interactiveW.setBoard(board_str)
self.interactiveRunning = False
def log(self, msg):
if not self.wasStoped:
self.showInLog(msg)
def showInLog(self, msg):
# This method not required that the state is not stopped
loggermsg = self.mainW.ui.logger.document().toPlainText()
self.mainW.ui.logger.setText(loggermsg + '\n -> ' + QtCore.QString().fromUtf8(msg))
self.mainW.ui.logger.moveCursor(QtGui.QTextCursor.End)
def prepareString(self, board):
myPrettyBoard = ''
for s in board:
if not (ord(s) is 13 or ord(s) is 10):
myPrettyBoard += s
if ord(s) is 10:
myPrettyBoard += '\n'
return myPrettyBoard
def interpreter_log_default_exception(self, exception):
if not self.wasStoped:
self.mainW.ui.statusbar.showMessage(QtCore.QString
(i18n('Was occurred an error')))
self.showInLog(i18n('Was occurred an error'))
self.log(exception.msg)
self.mainW.resetButtonsRunAndStop()
def interpreter_log_failure(self, exception):
if not self.wasStoped:
self.mainW.ui.statusbar.showMessage(QtCore.QString
(i18n('Was occurred an error')))
self.showInLog(i18n('Was occurred an error'))
self.showRowAndColError(exception)
self.log(exception.msg)
self.mainW.resetButtonsRunAndStop()
def showRowAndColError(self, exception):
self.showInLog(i18n('In row: ') +
str(exception.area.interval()[0].row) + ' // ' +
i18n('column: ') + str(exception.area.interval()[0].col))
def interpreter_boom_failure(self, exception):
if not self.wasStoped:
self.mainW.ui.statusbar.showMessage(QtCore.QString('Boom !!!'))
self.showInLog('Boom !!!')
self.log(exception.msg)
self.log('----------------'+
str(datetime.datetime.now())[:19] +
'-----------------\n')
if not self.interactiveRunning:
self.results = Results(self.mainW)
self.results.setInitialBoard(BoardViewer(self,
self.mainW.initialBoardGenerator.board, self.mainW.getClothing()))
self.results.setFinalBoard(BoardViewerError())
self.results.setRetVars(None)
self.results.ui.tabWidgetResults.setCurrentIndex(2)
self.setCodeInResults()
self.results.show()
else:
self.interactiveW.boom()
self.mainW.resetButtonsRunAndStop()
self.wasStoped = False
self.isOpenInteractiveW = False
self.interactiveRunning = False
def setCodeInResults(self):
fileCode = QtGui.QTextDocument(
i18n('### FILE CODE ###\n\n') + self.mainW.ui.textEditFile.document().toPlainText())
libraryCode = QtGui.QTextDocument(
i18n('### LIBRARY CODE ###\n\n') + self.mainW.ui.textEditLibrary.document().toPlainText())
self.results.setSourceCode(fileCode, libraryCode)
class RunButton(QtGui.QWidget):
def __init__(self, mainW, actionRun, actionStop):
super(RunButton, self).__init__()
self.mainW = mainW
self.actionRun = actionRun
self.actionStop = actionStop
def start(self, interpreter):
self.actionRun.setEnabled(False)
interpreter.run(str(self.mainW.fileOption.getFileName()),
self.mainW.programText(),
self.mainW.getInitialBoard())
def stopInterpreter(self):
self.mainW.guiInterpreterHandler.wasStoped = True
# CHECK BUTTON .....................
class CheckButton(QtGui.QWidget):
def __init__(self, mainW):
super(CheckButton, self).__init__()
self.mainW = mainW
def start(self):
self.gui = GUIInterpreterHandler_CheckMode(self.mainW)
self.mainW.programRun.handler = self.gui
self.mainW.programRun.run(str(self.mainW.fileOption.getFileName()),
self.mainW.programText(),
self.mainW.initialBoardGenerator.getStringBoard(),
ProgramRun.RunMode.ONLY_CHECK)
class GUIInterpreterHandler_CheckMode(GUIInterpreterHandler):
def success(self, board_string, result):
self.mainW.ui.statusbar.showMessage(QtCore.QString(i18n('Check completed')))
self.showInLog(i18n('Check completed, program is OK'))
self.log('----------------' +
str(datetime.datetime.now())[:19] +
'-----------------\n')
self.mainW.resetButtonsRunAndStop()
def initialize_failure_handler(self):
def fail_handler(exception):
self.mainW.ui.statusbar.showMessage(QtCore.QString(i18n('Check failed')))
self.showInLog(i18n('Check failed:'))
self.showRowAndColError(exception)
self.log(exception.msg)
self.log('----------------' +
str(datetime.datetime.now())[:19] +
'-----------------\n')
self.failure = EjecutionFailureHandler(fail_handler).failure
class InteractiveWindow(QtGui.QDialog):
def __init__(self, mainW):
super(InteractiveWindow, self).__init__()
self.setWindowTitle(i18n('Interactive Mode'))
self.setGeometry(200, 200, 600, 600)
self.ui = Ui_Interactive()
self.ui.setupUi(self)
self.ui.combo.activated[str].connect(self.onActivated)
self.mainW = mainW
self.current_clothing = 'Gobstones.xml'
self.pressAKey = True
self.setModal(True)
self.ui.pushButton.clicked.connect(self.switch_view)
self.currentImage = ':/ballGreen.png'
self.setStyleSheet( "InteractiveWindow{background-image:url(':/backgroundWidget.png');}")
self.load_views = None
self.forceQuit = False
def init_switcher(self):
if len(self.filesNames) == 1:
self.next_clothing = None
self.switcher = Switcher(i18n('Without clothing'), i18n('Without clothing'), self.ui.pushButton)
else:
self.current_clothing = self.boardV.getClothing()
if self.current_clothing == 'Gobstones.xml':
self.next_clothing = self.filesNames[1]
self.switcher = Switcher(i18n('Enable clothing'), i18n('Disable clothing'), self.ui.pushButton)
else:
self.next_clothing = 'Gobstones.xml'
self.switcher = Switcher(i18n('Disable clothing'), i18n('Enable clothing'), self.ui.pushButton)
def onActivated(self, text):
if not text == 'Gobstones':
if clothing_for_file_exists(self.mainW.fileOption.moduleFile):
fn = str(text) + ".xml"
path = os.path.join(clothing_dir_for_file(self.mainW.fileOption.moduleFile), fn)
self.next_clothing = self.current_clothing
self.current_clothing = path
self.boardV.setClothing(path)
self.boardV.update()
elif self.current_clothing != 'Gobstones.xml':
self.switch_view()
self.switcher.change_state(text)
def loadViewAlternatives(self):
self.filesNames = ['Gobstones', 'PixelBoard']
if clothing_for_file_exists(self.mainW.fileOption.moduleFile):
path = clothing_dir_for_file(self.mainW.fileOption.moduleFile)
files = os.listdir(path)
for f in files:
fileName, fileExtension = os.path.splitext(os.path.join(path,f))
if fileExtension == '.xml':
self.filesNames.append(os.path.join(path, fileName))
for fn in self.filesNames:
(filepath, filename) = os.path.split(fn)
self.ui.combo.addItem(filename)
def switch_view(self):
self.switcher.switch()
self.ui.pushButton.setText(self.switcher.get_text())
if self.current_clothing != "Gobstones.xml":
self.boardV.setClothing('Gobstones.xml')
self.boardV.update()
self.next_clothing = self.current_clothing
self.current_clothing = 'Gobstones.xml'
else:
if self.next_clothing is not None:
self.boardV.setClothing(self.add_extension(self.next_clothing))
self.boardV.update()
self.current_clothing = self.next_clothing
self.next_clothing = 'Gobstones.xml'
else:
return
def setPressAKeyState(self):
self.pressAKey = True
self.ui.label.setText( ' ' + i18n("Press a key to continue"))
self.currentImage = ':/ballGreen.png'
self.update()
def setProcessingAKeyState(self):
self.pressAKey = False
self.ui.label.setText( ' ' + i18n("Processing a key, wait"))
self.currentImage = ':/ballRed.png'
self.update()
def keyPressEvent(self, e):
modifiers = QtGui.QApplication.keyboardModifiers()
if e.key() == QtCore.Qt.Key_D and modifiers.testFlag(QtCore.Qt.ControlModifier):
a = str(e.text())
ordinalValue = ord(a)
self.setProcessingAKeyState()
self.mainW.programRun.send_input(ordinalValue)
if self.forceQuit:
super(InteractiveWindow, self).keyPressEvent(e)
self.close()
self.forceQuit = True
elif self.pressAKey:
if e.key() != QtCore.Qt.Key_Control:
self.forceQuit = False
try:
a = str(e.text())
ordinalValue = ord(a)
except:
if e.key() == QtCore.Qt.Key_Left:
self.setProcessingAKeyState()
self.mainW.programRun.send_input(1004)
elif e.key() == QtCore.Qt.Key_Up:
self.setProcessingAKeyState()
self.mainW.programRun.send_input(1001)
elif e.key() == QtCore.Qt.Key_Right:
self.setProcessingAKeyState()
self.mainW.programRun.send_input(1003)
elif e.key() == QtCore.Qt.Key_Down:
self.setProcessingAKeyState()
self.mainW.programRun.send_input(1002)
return
self.setProcessingAKeyState()
self.mainW.programRun.send_input(ordinalValue)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.Antialiasing
painter.begin(self)
rect = QtCore.QRect(self.width()-285, self.height() - 32 , 20, 20)
img = QtGui.QImage(self.currentImage)
painter.drawImage(rect, img)
painter.end()
def setBoard(self, board):
self.boardV = BoardViewer(self, parseABoardString(board), self.add_extension(self.current_clothing))
self.boardV.setParent(self.ui.boardViewer)
self.ui.boardViewer.removeTab(0)
self.ui.boardViewer.insertTab(0, self.boardV, i18n('Board'))
def add_extension(self, path):
if not path.endswith('xml'):
return path + '.xml'
else:
return path
def boom(self):
self.setStatusMessage(' BOOM !!!')
boom = BoardViewerError()
self.ui.boardViewer.removeTab(0)
self.ui.boardViewer.insertTab(0, boom, i18n('Interactive'))
def setStatusMessage(self, message):
self.pressAKey = False
self.ui.label.setText(i18n(message))
def reset_clothing(self):
self.ui.combo.clear()
def initialStatus(self, partialBoard):
if (self.load_views is None) or (self.load_views != root_path()):
self.reset_clothing()
self.loadViewAlternatives()
self.load_views = root_path()
self.boardV = BoardViewer(self, parseABoardString(partialBoard), self.mainW.getClothing())
self.boardV.setParent(self.ui.boardViewer)
self.ui.boardViewer.removeTab(0)
self.ui.boardViewer.insertTab(0, self.boardV, i18n('Board'))
self.setPressAKeyState()
self.init_switcher()
def closeEvent(self, e):
self.mainW.ui.actionStop.setEnabled(False)
self.mainW.programRun.send_input(4)
e.accept()
| gpl-3.0 |
krafczyk/spack | lib/spack/external/_pytest/fixtures.py | 21 | 45468 | from __future__ import absolute_import, division, print_function
import inspect
import sys
import warnings
import py
from py._code.code import FormattedExcinfo
import _pytest
from _pytest import nodes
from _pytest._code.code import TerminalRepr
from _pytest.compat import (
NOTSET, exc_clear, _format_args,
getfslineno, get_real_func,
is_generator, isclass, getimfunc,
getlocation, getfuncargnames,
safe_getattr,
FuncargnamesCompatAttr,
)
from _pytest.outcomes import fail, TEST_OUTCOME
from ordereddict_backport import OrderedDict
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
'class': _pytest.python.Class,
'module': _pytest.python.Module,
'function': _pytest.main.Item,
})
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache, scopenum + 1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
if newargkeys: # found a slicing key
slicing_argkey, _ = newargkeys.popitem()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_values = {} # argname -> fixture value
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
# backward incompatible note: now a readonly property
return list(self._pyfuncitem._fixtureinfo.names_closure)
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(
deprecated.GETFUNCARGVALUE,
DeprecationWarning,
stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfixturevalue(fixturedef)
self._fixture_values[argname] = result
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _getfixturevalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" % (
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_values = request._fixture_values
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
raise ValueError(
"{0} {1}has an unsupported scope value '{2}'".format(
descr, 'from {0} '.format(where) if where else '',
scope)
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist and name not in available:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" % (", ".join(sorted(available)),)
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
lines[0].strip()), red=True)
for line in lines[1:]:
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
line.strip()), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
def teardown():
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr='fixture {0}'.format(func.__name__),
where=baseid
)
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
exceptions = []
try:
while self._finalizer:
try:
func = self._finalizer.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
py.builtin._reraise(*e)
finally:
ihook = self._fixturemanager.session.ihook
ihook.pytest_fixture_post_finalizer(fixturedef=self)
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
ihook = self._fixturemanager.session.ihook
return ihook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
class FixtureFunctionMarker:
def __init__(self, scope, params, autouse=False, ids=None, name=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.ids = ids
self.name = name
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or without parameters) to define a
fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
Fixtures can optionally provide their values to test functions using a ``yield`` statement,
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
if callable(scope) and params is None and not autouse:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, ids=ids, name=name)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
parametrize_func = getattr(metafunc.function, 'parametrize', None)
func_params = getattr(parametrize_func, 'args', [[None]])
func_kwargs = getattr(parametrize_func, 'kwargs', {})
# skip directly parametrized arguments
if "argnames" in func_kwargs:
argnames = parametrize_func.kwargs["argnames"]
else:
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
from _pytest import deprecated
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
'and be decorated with @pytest.fixture:\n%s' % name
assert not name.startswith(self._argprefix), msg
fixture_def = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
| lgpl-2.1 |
hn8841182/W11 | static/Brython3.1.0-20150301-090019/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| gpl-3.0 |
teodorch85/teir | python/script.py | 1 | 1551 | import webiopi
import datetime
GPIO = webiopi.GPIO
#LIGHT = 17 # GPIO pin using BCM numbering
VALVE1 = 2
VALVE2 = 3
VALVE3 = 7
VALVE4 = 8
VALVE5 = 9
VALVE6 = 10
VALVE7 = 11
VALVE8 = 18
#HOUR_ON = 8 # Turn Light ON at 08:00
#HOUR_OFF = 18 # Turn Light OFF at 18:00
# setup function is automatically called at WebIOPi startup
def setup():
# set the GPIO used by the light to output
GPIO.setFunction(VALVE1, GPIO.OUT)
GPIO.setFunction(VALVE2, GPIO.OUT)
GPIO.setFunction(VALVE3, GPIO.OUT)
GPIO.setFunction(VALVE4, GPIO.OUT)
GPIO.setFunction(VALVE5, GPIO.OUT)
GPIO.setFunction(VALVE6, GPIO.OUT)
GPIO.setFunction(VALVE7, GPIO.OUT)
GPIO.setFunction(VALVE8, GPIO.OUT)
# retrieve current datetime
#now = datetime.datetime.now()
# test if we are between ON time and tun the light ON
#if ((now.hour >= HOUR_ON) and (now.hour < HOUR_OFF)):
# GPIO.digitalWrite(LIGHT, GPIO.HIGH)
@webiopi.macro
def getLightHours():
return "%d;%d" % (HOUR_ON, HOUR_OFF)
@webiopi.macro
def setLightHours(on, off):
global HOUR_ON, HOUR_OFF
HOUR_ON = int(on)
HOUR_OFF = int(off)
return getLightHours()
# destroy function is called at WebIOPi shutdown
def destroy():
GPIO.digitalWrite(VALVE1, GPIO.HIGH)
GPIO.digitalWrite(VALVE2, GPIO.HIGH)
GPIO.digitalWrite(VALVE3, GPIO.HIGH)
GPIO.digitalWrite(VALVE4, GPIO.HIGH)
GPIO.digitalWrite(VALVE5, GPIO.HIGH)
GPIO.digitalWrite(VALVE6, GPIO.HIGH)
GPIO.digitalWrite(VALVE7, GPIO.HIGH)
GPIO.digitalWrite(VALVE8, GPIO.HIGH)
| mit |
OptiPop/external_chromium_org | tools/telemetry/telemetry/web_perf/metrics/rendering_frame.py | 45 | 3109 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
from telemetry.timeline import slice as slice_module
from telemetry.timeline import bounds
class MissingData(Exception):
pass
class NoBeginFrameIdException(Exception):
pass
class RenderingFrame(object):
"""Object with information about the triggering of a BeginMainFrame event."""
send_begin_frame_event = 'ThreadProxy::ScheduledActionSendBeginMainFrame'
begin_main_frame_event = 'ThreadProxy::BeginMainFrame'
def __init__(self, events):
all_send_begin_frame_events = [e for e in events
if e.name == self.send_begin_frame_event]
if len(all_send_begin_frame_events) != 1:
raise MissingData('There must be at exactly one %s event.' %
self.send_begin_frame_event)
all_begin_main_frame_events = [e for e in events
if e.name == self.begin_main_frame_event]
if not all_begin_main_frame_events:
raise MissingData('There must be at least one %s event.' %
self.begin_main_frame_event)
all_begin_main_frame_events.sort(key=lambda e: e.start)
self._send_begin_frame = all_send_begin_frame_events[0]
self._begin_main_frame = all_begin_main_frame_events[-1]
self._bounds = bounds.Bounds()
self._bounds.AddEvent(self._begin_main_frame)
self._bounds.AddEvent(self._send_begin_frame)
@staticmethod
def IsEventUseful(event):
return event.name in [RenderingFrame.send_begin_frame_event,
RenderingFrame.begin_main_frame_event]
@property
def bounds(self):
return self._bounds
@property
def queueing_duration(self):
return self._begin_main_frame.start - self._send_begin_frame.start
def GetFrameEventsInsideRange(renderer_process, timeline_range):
"""Returns RenderingFrames for all relevant events in the timeline_range."""
# First filter all events from the renderer_process and turn them into a
# dictonary of the form:
# {0: [send_begin_frame, begin_main_frame, begin_main_frame],
# 1: [begin_main_frame, send_begin_frame],
# 2: [send_begin_frame, begin_main_frame]}
begin_frame_events_by_id = defaultdict(list)
for event in renderer_process.IterAllEvents(
event_type_predicate=lambda t: t == slice_module.Slice,
event_predicate=RenderingFrame.IsEventUseful):
begin_frame_id = event.args.get('begin_frame_id', None)
if begin_frame_id is None:
raise NoBeginFrameIdException('Event is missing a begin_frame_id.')
begin_frame_events_by_id[begin_frame_id].append(event)
# Now, create RenderingFrames for events wherever possible.
frames = []
for events in begin_frame_events_by_id.values():
try:
frame = RenderingFrame(events)
if frame.bounds.Intersects(timeline_range):
frames.append(frame)
except MissingData:
continue
frames.sort(key=lambda frame: frame.bounds.min)
return frames
| bsd-3-clause |
pichillilorenzo/JavaScript-Completions | helper/can_i_use/can_i_use_command.py | 2 | 10624 | items_found_can_i_use = None
can_i_use_file = None
can_i_use_popup_is_showing = False
can_i_use_list_from_main_menu = False
path_to_can_i_use_data = os.path.join(H_SETTINGS_FOLDER, "can_i_use", "can_i_use_data.json")
path_to_test_can_i_use_data = os.path.join(H_SETTINGS_FOLDER, "can_i_use", "can_i_use_data2.json")
url_can_i_use_json_data = "https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json"
can_i_use_css = ""
with open(os.path.join(H_SETTINGS_FOLDER, "can_i_use", "style.css")) as css_file:
can_i_use_css = "<style>"+css_file.read()+"</style>"
def donwload_can_i_use_json_data() :
global can_i_use_file
if os.path.isfile(path_to_can_i_use_data) :
with open(path_to_can_i_use_data) as json_file:
try :
can_i_use_file = json.load(json_file)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
if Util.download_and_save(url_can_i_use_json_data, path_to_test_can_i_use_data) :
if os.path.isfile(path_to_can_i_use_data) :
if not Util.checksum_sha1_equalcompare(path_to_can_i_use_data, path_to_test_can_i_use_data) :
with open(path_to_test_can_i_use_data) as json_file:
try :
can_i_use_file = json.load(json_file)
if os.path.isfile(path_to_can_i_use_data) :
os.remove(path_to_can_i_use_data)
os.rename(path_to_test_can_i_use_data, path_to_can_i_use_data)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use new \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
if os.path.isfile(path_to_test_can_i_use_data) :
os.remove(path_to_test_can_i_use_data)
else :
os.rename(path_to_test_can_i_use_data, path_to_can_i_use_data)
with open(path_to_can_i_use_data) as json_file :
try :
can_i_use_file = json.load(json_file)
except Exception as e :
print("Error: "+traceback.format_exc())
sublime.active_window().status_message("Can't use \"Can I use\" json data from: https://raw.githubusercontent.com/Fyrd/caniuse/master/data.json")
Util.create_and_start_thread(donwload_can_i_use_json_data, "DownloadCanIuseJsonData")
def find_in_can_i_use(word) :
global can_i_use_file
can_i_use_data = can_i_use_file.get("data")
word = word.lower()
return [value for key, value in can_i_use_data.items() if value["title"].lower().find(word) >= 0]
def back_to_can_i_use_list(action):
global can_i_use_popup_is_showing
if action.find("http") >= 0:
webbrowser.open(action)
return
view = sublime.active_window().active_view()
can_i_use_popup_is_showing = False
view.hide_popup()
if len(action.split(",")) > 1 and action.split(",")[1] == "main-menu" :
view.run_command("can_i_use", args={"from": "main-menu"})
else :
view.run_command("can_i_use")
def show_pop_can_i_use(index):
global can_i_use_file
global items_found_can_i_use
global can_i_use_popup_is_showing
if index < 0:
return
item = items_found_can_i_use[index]
browser_accepted = ["ie", "edge", "firefox", "chrome", "safari", "opera", "ios_saf", "op_mini", "android", "and_chr"]
browser_name = [
" IE",
" EDGE",
" Firefox",
" Chrome",
" Safari",
" Opera",
" iOS Safari",
" Opera Mini",
" Android Browser",
"Chrome for Android"
]
html_browser = ""
html_browser += "<div>"
html_browser += "<h1 class=\"title\">"+cgi.escape(item["title"])+" <a href=\""+item["spec"].replace(" ", "%20")+"\"><span class=\"status "+item["status"]+"\"> - "+item["status"].upper()+"</span></a></h1>"
html_browser += "<p class=\"description\">"+cgi.escape(item["description"])+"</p>"
html_browser += "<p class=\"\"><span class=\"support\">Global Support: <span class=\"support-y\">"+str(item["usage_perc_y"])+"%</span>"+( " + <span class=\"support-a\">"+str(item["usage_perc_a"])+"%</span> = " if float(item["usage_perc_a"]) > 0 else "" )+( "<span class=\"support-total\">"+str( "{:10.2f}".format(float(item["usage_perc_y"]) + float(item["usage_perc_a"])) )+"%</span>" if float(item["usage_perc_a"]) > 0 else "" )+"</span> "+( " ".join(["<span class=\"category\">"+category+"</span>" for category in item["categories"]]) )+"</p>"
html_browser += "</div>"
html_browser += "<div class=\"container-browser-list\">"
i = 0
for browser in browser_accepted :
browser_versions = can_i_use_file["agents"]
stat = item["stats"].get(browser)
stat_items_ordered = list()
for k in stat.keys() :
if k != "TP" :
stat_items_ordered.append(k)
if len(stat_items_ordered) >= 1 and stat_items_ordered[0] != "all" :
stat_items_ordered.sort(key=LooseVersion)
stat_items_ordered = stat_items_ordered[::-1]
html_p = "<p class=\"version-stat-item\"><span class=\"browser-name\">"+browser_name[i]+"</span> : "
j = 0
while j < len(stat_items_ordered) :
if j == 7:
break
class_name = stat.get(stat_items_ordered[j])
html_annotation_numbers = ""
requires_prefix = ""
can_be_enabled = ""
if re.search(r"\bx\b", class_name) :
requires_prefix = "x"
if re.search(r"\bd\b", class_name) :
can_be_enabled = "d"
if class_name.find("#") >= 0 :
numbers = class_name[class_name.find("#"):].strip().split(" ")
for number in numbers :
number = int(number.replace("#", ""))
html_annotation_numbers += "<span class=\"annotation-number\">"+str(number)+"</span>"
html_p += "<span class=\"version-stat "+stat.get(stat_items_ordered[j])+" \">"+( html_annotation_numbers if html_annotation_numbers else "" )+stat_items_ordered[j]+( "<span class=\"can-be-enabled\"> </span>" if can_be_enabled else "" )+( "<span class=\"requires-prefix\"> </span>" if requires_prefix else "" )+"</span> "
j = j + 1
html_p += "</p>"
html_browser += html_p
i = i + 1
html_browser += "</div>"
if item["notes_by_num"] :
html_browser += "<div>"
html_browser += "<h3>Notes</h3>"
notes_by_num = item["notes_by_num"]
notes_by_num_ordered = list()
for k in notes_by_num.keys() :
notes_by_num_ordered.append(k)
notes_by_num_ordered.sort()
i = 0
while i < len(notes_by_num_ordered) :
note = notes_by_num.get(notes_by_num_ordered[i])
html_p = "<p class=\"note\"><span class=\"annotation-number\">"+str(notes_by_num_ordered[i])+"</span>"+cgi.escape(note)+"</p>"
html_browser += html_p
i = i + 1
html_browser += "</div>"
if item["links"] :
html_browser += "<div>"
html_browser += "<h3>Links</h3>"
links = item["links"]
for link in links :
html_p = "<p class=\"link\"><a href=\""+link.get("url")+"\">"+cgi.escape(link.get("title"))+"</a></p>"
html_browser += html_p
html_browser += "</div>"
view = sublime.active_window().active_view()
can_i_use_popup_is_showing = True
view.show_popup("""
<html>
<head></head>
<body>
"""+can_i_use_css+"""
<div class=\"container-back-button\">
<a class=\"back-button\" href=\"back"""+( ",main-menu" if can_i_use_list_from_main_menu else "")+"""\">< Back</a>
<a class=\"view-on-site\" href=\"http://caniuse.com/#search="""+item["title"].replace(" ", "%20")+"""\"># View on \"Can I use\" site #</a>
</div>
<div class=\"content\">
"""+html_browser+"""
<div class=\"legend\">
<h3>Legend</h3>
<div class=\"container-legend-items\">
<span class=\"legend-item y\"> </span> = Supported
<span class=\"legend-item n\"> </span> = Not Supported
<span class=\"legend-item p a\"> </span> = Partial support
<span class=\"legend-item u\"> </span> = Support unknown
<span class=\"legend-item requires-prefix\"> </span> = Requires Prefix
<span class=\"legend-item can-be-enabled\"> </span> = Can Be Enabled
</div>
</div>
</div>
</body>
</html>""", sublime.COOPERATE_WITH_AUTO_COMPLETE, -1, 1250, 650, back_to_can_i_use_list)
class can_i_useCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
global items_found_can_i_use
global can_i_use_file
global can_i_use_list_from_main_menu
can_i_use_data = can_i_use_file.get("data")
if not can_i_use_data :
return
view = self.view
selection = view.sel()[0]
if args.get("from") != "main-menu" :
can_i_use_list_from_main_menu = False
word = view.substr(view.word(selection)).strip()
items_found_can_i_use = find_in_can_i_use(word)
sublime.active_window().show_quick_panel([item["title"] for item in items_found_can_i_use], show_pop_can_i_use)
else :
can_i_use_list_from_main_menu = True
items_found_can_i_use = find_in_can_i_use("")
sublime.active_window().show_quick_panel([item["title"] for item in items_found_can_i_use], show_pop_can_i_use)
def is_enabled(self, **args):
view = self.view
if args.get("from") == "main-menu" or javascriptCompletions.get("enable_can_i_use_menu_option") :
return True
return False
def is_visible(self, **args):
view = self.view
if args.get("from") == "main-menu" :
return True
if javascriptCompletions.get("enable_can_i_use_menu_option") :
if Util.split_string_and_find_on_multiple(view.scope_name(0), ["source.js", "text.html.basic", "source.css"]) < 0 :
return False
return True
return False
class can_i_use_hide_popupEventListener(sublime_plugin.EventListener):
def on_modified_async(self, view) :
global can_i_use_popup_is_showing
if can_i_use_popup_is_showing :
view.hide_popup()
can_i_use_popup_is_showing = False | mit |
cloudera/hue | desktop/core/ext-py/djangosaml2-0.16.11/djangosaml2/tests/auth_response.py | 4 | 5545 | # Copyright (C) 2011-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2010 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
def auth_response(session_id,
uid,
audience='http://sp.example.com/saml2/metadata/',
acs_url='http://sp.example.com/saml2/acs/',
metadata_url='http://sp.example.com/saml2/metadata/',
attribute_statements=None):
"""Generates a fresh signed authentication response
Params:
session_id: The session ID to generate the reponse for. Login set an
outstanding session ID, i.e. djangosaml2 waits for a response for
that session.
uid: Unique identifier for a User (will be present as an attribute in
the answer). Ignored when attribute_statements is not ``None``.
audience: SP entityid (used when PySAML validates the response
audience).
acs_url: URL where the response has been posted back.
metadata_url: URL where the SP metadata can be queried.
attribute_statements: An alternative XML AttributeStatement to use in
lieu of the default (uid). The uid argument is ignored when
attribute_statements is not ``None``.
"""
timestamp = datetime.datetime.now() - datetime.timedelta(seconds=10)
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
if attribute_statements is None:
attribute_statements = (
'<saml:AttributeStatement>'
'<saml:Attribute FriendlyName="uid" Name="urn:oid:0.9.2342.19200300.100.1.1" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri">'
'<saml:AttributeValue xsi:nil="true" xsi:type="xs:string">'
'%(uid)s'
'</saml:AttributeValue>'
'</saml:Attribute>'
'</saml:AttributeStatement>'
) % {'uid': uid}
saml_response_tpl = (
"<?xml version='1.0' encoding='UTF-8'?>"
'<samlp:Response xmlns:saml="urn:oasis:names:tc:SAML:2.0:assertion" xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" Destination="%(acs_url)s" ID="id-88b9f586a2a3a639f9327485cc37c40a" InResponseTo="%(session_id)s" IssueInstant="%(timestamp)s" Version="2.0">'
'<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">'
'https://idp.example.com/simplesaml/saml2/idp/metadata.php'
'</saml:Issuer>'
'<samlp:Status>'
'<samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success" />'
'</samlp:Status>'
'<saml:Assertion ID="id-093952102ceb73436e49cb91c58b0578" IssueInstant="%(timestamp)s" Version="2.0">'
'<saml:Issuer Format="urn:oasis:names:tc:SAML:2.0:nameid-format:entity">'
'https://idp.example.com/simplesaml/saml2/idp/metadata.php'
'</saml:Issuer>'
'<saml:Subject>'
'<saml:NameID Format="urn:oasis:names:tc:SAML:2.0:nameid-format:transient" NameQualifier="" SPNameQualifier="%(metadata_url)s">'
'1f87035b4c1325b296a53d92097e6b3fa36d7e30ee82e3fcb0680d60243c1f03'
'</saml:NameID>'
'<saml:SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">'
'<saml:SubjectConfirmationData InResponseTo="%(session_id)s" NotOnOrAfter="%(tomorrow)s" Recipient="%(acs_url)s" />'
'</saml:SubjectConfirmation>'
'</saml:Subject>'
'<saml:Conditions NotBefore="%(yesterday)s" NotOnOrAfter="%(tomorrow)s">'
'<saml:AudienceRestriction>'
'<saml:Audience>'
'%(audience)s'
'</saml:Audience>'
'</saml:AudienceRestriction>'
'</saml:Conditions>'
'<saml:AuthnStatement AuthnInstant="%(timestamp)s" SessionIndex="%(session_id)s">'
'<saml:AuthnContext>'
'<saml:AuthnContextClassRef>'
'urn:oasis:names:tc:SAML:2.0:ac:classes:Password'
'</saml:AuthnContextClassRef>'
'</saml:AuthnContext>'
'</saml:AuthnStatement>'
'%(attribute_statements)s'
'</saml:Assertion>'
'</samlp:Response>')
return saml_response_tpl % {
'session_id': session_id,
'audience': audience,
'acs_url': acs_url,
'metadata_url': metadata_url,
'attribute_statements': attribute_statements,
'timestamp': timestamp.strftime('%Y-%m-%dT%H:%M:%SZ'),
'tomorrow': tomorrow.strftime('%Y-%m-%dT%H:%M:%SZ'),
'yesterday': yesterday.strftime('%Y-%m-%dT%H:%M:%SZ'),
}
| apache-2.0 |
crakensio/django_training | lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| cc0-1.0 |
PaloAltoNetworks-BD/autofocus-client-library | autofocus/config.py | 1 | 1373 | import logging
try:
from .gsrt_config import GSRTConfig
except ImportError:
from gsrt_config import GSRTConfig
def get_logger():
""" To change log level from calling code, use something like
logging.getLogger("autofocus").setLevel(logging.DEBUG)
"""
logger = logging.getLogger("autofocus")
if not logger.handlers:
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
AF_APIKEY = None
SHOW_WARNINGS = False
SSL_VERIFY = True
SSL_CERT = None
defaults = {
"apikey": "",
"ssl_verify": 'true',
"api_base": "https://autofocus.paloaltonetworks.com/api/v1.0",
"ignore_warnings": 'false',
"http_timeout": "120"
}
gconfig = GSRTConfig("autofocus", defaults=defaults)
AF_APIKEY = gconfig.get("apikey")
SSL_VERIFY = gconfig.getboolean("ssl_verify")
HTTP_TIMEOUT = float(gconfig.get("http_timeout"))
_BASE_URL = gconfig.get("api_base")
ignore_warnings = gconfig.getboolean("ignore_warnings")
SHOW_WARNINGS = False if ignore_warnings else True
if SHOW_WARNINGS:
get_logger().setLevel(logging.WARNING)
else:
get_logger().setLevel(logging.ERROR)
try:
SSL_CERT = gconfig.get("autofocus", "ssl_cert")
except Exception:
pass
| isc |
cklein/wtforms | tests/fields.py | 1 | 35845 | from __future__ import unicode_literals
import sys
from datetime import date, datetime
from decimal import Decimal, ROUND_UP, ROUND_DOWN
from unittest import TestCase
from wtforms import validators, widgets, meta
from wtforms.fields import *
from wtforms.fields import Label, Field, SelectFieldBase, html5
from wtforms.form import Form
from wtforms.compat import text_type
from wtforms.utils import unset_value
from tests.common import DummyPostData
from wtforms.widgets import TextInput
PYTHON_VERSION = sys.version_info
class AttrDict(object):
def __init__(self, *args, **kw):
self.__dict__.update(*args, **kw)
def make_form(_name='F', **fields):
return type(str(_name), (Form, ), fields)
class DefaultsTest(TestCase):
def test(self):
expected = 42
def default_callable():
return expected
test_value = StringField(default=expected).bind(Form(), 'a')
test_value.process(None)
self.assertEqual(test_value.data, expected)
test_callable = StringField(default=default_callable).bind(Form(), 'a')
test_callable.process(None)
self.assertEqual(test_callable.data, expected)
class LabelTest(TestCase):
def test(self):
expected = """<label for="test">Caption</label>"""
label = Label('test', 'Caption')
self.assertEqual(label(), expected)
self.assertEqual(str(label), expected)
self.assertEqual(text_type(label), expected)
self.assertEqual(label.__html__(), expected)
self.assertEqual(label().__html__(), expected)
self.assertEqual(label('hello'), """<label for="test">hello</label>""")
self.assertEqual(StringField('hi').bind(Form(), 'a').label.text, 'hi')
if PYTHON_VERSION < (3,):
self.assertEqual(repr(label), "Label(u'test', u'Caption')")
else:
self.assertEqual(repr(label), "Label('test', 'Caption')")
self.assertEqual(label.__unicode__(), expected)
def test_auto_label(self):
t1 = StringField().bind(Form(), 'foo_bar')
self.assertEqual(t1.label.text, 'Foo Bar')
t2 = StringField('').bind(Form(), 'foo_bar')
self.assertEqual(t2.label.text, '')
def test_override_for(self):
label = Label('test', 'Caption')
self.assertEqual(label(for_='foo'), """<label for="foo">Caption</label>""")
self.assertEqual(label(**{'for': 'bar'}), """<label for="bar">Caption</label>""")
class FlagsTest(TestCase):
def setUp(self):
t = StringField(validators=[validators.DataRequired()]).bind(Form(), 'a')
self.flags = t.flags
def test_existing_values(self):
self.assertEqual(self.flags.required, True)
self.assertTrue('required' in self.flags)
self.assertEqual(self.flags.optional, False)
self.assertTrue('optional' not in self.flags)
def test_assignment(self):
self.assertTrue('optional' not in self.flags)
self.flags.optional = True
self.assertEqual(self.flags.optional, True)
self.assertTrue('optional' in self.flags)
def test_unset(self):
self.flags.required = False
self.assertEqual(self.flags.required, False)
self.assertTrue('required' not in self.flags)
def test_repr(self):
self.assertEqual(repr(self.flags), '<wtforms.fields.Flags: {required}>')
def test_underscore_property(self):
self.assertRaises(AttributeError, getattr, self.flags, '_foo')
self.flags._foo = 42
self.assertEqual(self.flags._foo, 42)
class UnsetValueTest(TestCase):
def test(self):
self.assertEqual(str(unset_value), '<unset value>')
self.assertEqual(repr(unset_value), '<unset value>')
self.assertEqual(bool(unset_value), False)
assert not unset_value
self.assertEqual(unset_value.__nonzero__(), False)
self.assertEqual(unset_value.__bool__(), False)
class FiltersTest(TestCase):
class F(Form):
a = StringField(default=' hello', filters=[lambda x: x.strip()])
b = StringField(default='42', filters=[int, lambda x: -x])
def test_working(self):
form = self.F()
self.assertEqual(form.a.data, 'hello')
self.assertEqual(form.b.data, -42)
assert form.validate()
def test_failure(self):
form = self.F(DummyPostData(a=[' foo bar '], b=['hi']))
self.assertEqual(form.a.data, 'foo bar')
self.assertEqual(form.b.data, 'hi')
self.assertEqual(len(form.b.process_errors), 1)
assert not form.validate()
class FieldTest(TestCase):
class F(Form):
a = StringField(default='hello', render_kw={'readonly': True, 'foo': u'bar'})
def setUp(self):
self.field = self.F().a
def test_unbound_field(self):
unbound = self.F.a
assert unbound.creation_counter != 0
assert unbound.field_class is StringField
self.assertEqual(unbound.args, ())
self.assertEqual(unbound.kwargs, {'default': 'hello', 'render_kw': {'readonly': True, 'foo': u'bar'}})
assert repr(unbound).startswith('<UnboundField(StringField')
def test_htmlstring(self):
self.assertTrue(isinstance(self.field.__html__(), widgets.HTMLString))
def test_str_coerce(self):
self.assertTrue(isinstance(str(self.field), str))
self.assertEqual(str(self.field), str(self.field()))
def test_unicode_coerce(self):
self.assertEqual(text_type(self.field), self.field())
def test_process_formdata(self):
Field.process_formdata(self.field, [42])
self.assertEqual(self.field.data, 42)
def test_meta_attribute(self):
# Can we pass in meta via _form?
form = self.F()
assert form.a.meta is form.meta
# Can we pass in meta via _meta?
form_meta = meta.DefaultMeta()
field = StringField(_name='Foo', _form=None, _meta=form_meta)
assert field.meta is form_meta
# Do we fail if both _meta and _form are None?
self.assertRaises(TypeError, StringField, _name='foo', _form=None)
def test_render_kw(self):
form = self.F()
self.assertEqual(form.a(), u'<input foo="bar" id="a" name="a" readonly type="text" value="hello">')
self.assertEqual(form.a(foo=u'baz'), u'<input foo="baz" id="a" name="a" readonly type="text" value="hello">')
self.assertEqual(
form.a(foo=u'baz', readonly=False, other='hello'),
u'<input foo="baz" id="a" name="a" other="hello" type="text" value="hello">'
)
def test_select_field_copies_choices(self):
class F(Form):
items = SelectField(choices=[])
def __init__(self, *args, **kwargs):
super(F, self).__init__(*args, **kwargs)
def add_choice(self, choice):
self.items.choices.append((choice, choice))
f1 = F()
f2 = F()
f1.add_choice('a')
f2.add_choice('b')
self.assertEqual(f1.items.choices, [('a', 'a')])
self.assertEqual(f2.items.choices, [('b', 'b')])
self.assertTrue(f1.items.choices is not f2.items.choices)
class PrePostTestField(StringField):
def pre_validate(self, form):
if self.data == "stoponly":
raise validators.StopValidation()
elif self.data.startswith("stop"):
raise validators.StopValidation("stop with message")
def post_validate(self, form, stopped):
if self.data == "p":
raise ValueError("Post")
elif stopped and self.data == "stop-post":
raise ValueError("Post-stopped")
class PrePostValidationTest(TestCase):
class F(Form):
a = PrePostTestField(validators=[validators.Length(max=1, message="too long")])
def _init_field(self, value):
form = self.F(a=value)
form.validate()
return form.a
def test_pre_stop(self):
a = self._init_field("long")
self.assertEqual(a.errors, ["too long"])
stoponly = self._init_field("stoponly")
self.assertEqual(stoponly.errors, [])
stopmessage = self._init_field("stopmessage")
self.assertEqual(stopmessage.errors, ["stop with message"])
def test_post(self):
a = self._init_field("p")
self.assertEqual(a.errors, ["Post"])
stopped = self._init_field("stop-post")
self.assertEqual(stopped.errors, ["stop with message", "Post-stopped"])
class SelectFieldTest(TestCase):
class F(Form):
a = SelectField(choices=[('a', 'hello'), ('btest', 'bye')], default='a')
b = SelectField(choices=[(1, 'Item 1'), (2, 'Item 2')], coerce=int, option_widget=widgets.TextInput())
def test_defaults(self):
form = self.F()
self.assertEqual(form.a.data, 'a')
self.assertEqual(form.b.data, None)
self.assertEqual(form.validate(), False)
self.assertEqual(form.a(), """<select id="a" name="a"><option selected value="a">hello</option><option value="btest">bye</option></select>""")
self.assertEqual(form.b(), """<select id="b" name="b"><option value="1">Item 1</option><option value="2">Item 2</option></select>""")
def test_with_data(self):
form = self.F(DummyPostData(a=['btest']))
self.assertEqual(form.a.data, 'btest')
self.assertEqual(form.a(), """<select id="a" name="a"><option value="a">hello</option><option selected value="btest">bye</option></select>""")
def test_value_coercion(self):
form = self.F(DummyPostData(b=['2']))
self.assertEqual(form.b.data, 2)
self.assertTrue(form.b.validate(form))
form = self.F(DummyPostData(b=['b']))
self.assertEqual(form.b.data, None)
self.assertFalse(form.b.validate(form))
def test_iterable_options(self):
form = self.F()
first_option = list(form.a)[0]
self.assertTrue(isinstance(first_option, form.a._Option))
self.assertEqual(
list(text_type(x) for x in form.a),
['<option selected value="a">hello</option>', '<option value="btest">bye</option>']
)
self.assertTrue(isinstance(first_option.widget, widgets.Option))
self.assertTrue(isinstance(list(form.b)[0].widget, widgets.TextInput))
self.assertEqual(first_option(disabled=True), '<option disabled selected value="a">hello</option>')
def test_default_coerce(self):
F = make_form(a=SelectField(choices=[('a', 'Foo')]))
form = F(DummyPostData(a=[]))
assert not form.validate()
self.assertEqual(form.a.data, 'None')
self.assertEqual(len(form.a.errors), 1)
self.assertEqual(form.a.errors[0], 'Not a valid choice')
class SelectMultipleFieldTest(TestCase):
class F(Form):
a = SelectMultipleField(choices=[('a', 'hello'), ('b', 'bye'), ('c', 'something')], default=('a', ))
b = SelectMultipleField(coerce=int, choices=[(1, 'A'), (2, 'B'), (3, 'C')], default=("1", "3"))
def test_defaults(self):
form = self.F()
self.assertEqual(form.a.data, ['a'])
self.assertEqual(form.b.data, [1, 3])
# Test for possible regression with null data
form.a.data = None
self.assertTrue(form.validate())
self.assertEqual(list(form.a.iter_choices()), [(v, l, False) for v, l in form.a.choices])
def test_with_data(self):
form = self.F(DummyPostData(a=['a', 'c']))
self.assertEqual(form.a.data, ['a', 'c'])
self.assertEqual(list(form.a.iter_choices()), [('a', 'hello', True), ('b', 'bye', False), ('c', 'something', True)])
self.assertEqual(form.b.data, [])
form = self.F(DummyPostData(b=['1', '2']))
self.assertEqual(form.b.data, [1, 2])
self.assertTrue(form.validate())
form = self.F(DummyPostData(b=['1', '2', '4']))
self.assertEqual(form.b.data, [1, 2, 4])
self.assertFalse(form.validate())
def test_coerce_fail(self):
form = self.F(b=['a'])
assert form.validate()
self.assertEqual(form.b.data, None)
form = self.F(DummyPostData(b=['fake']))
assert not form.validate()
self.assertEqual(form.b.data, [1, 3])
class RadioFieldTest(TestCase):
class F(Form):
a = RadioField(choices=[('a', 'hello'), ('b', 'bye')], default='a')
b = RadioField(choices=[(1, 'Item 1'), (2, 'Item 2')], coerce=int)
def test(self):
form = self.F()
self.assertEqual(form.a.data, 'a')
self.assertEqual(form.b.data, None)
self.assertEqual(form.validate(), False)
self.assertEqual(
form.a(),
(
"""<ul id="a">"""
"""<li><input checked id="a-0" name="a" type="radio" value="a"> <label for="a-0">hello</label></li>"""
"""<li><input id="a-1" name="a" type="radio" value="b"> <label for="a-1">bye</label></li></ul>"""
)
)
self.assertEqual(
form.b(),
(
"""<ul id="b">"""
"""<li><input id="b-0" name="b" type="radio" value="1"> <label for="b-0">Item 1</label></li>"""
"""<li><input id="b-1" name="b" type="radio" value="2"> <label for="b-1">Item 2</label></li></ul>"""
)
)
self.assertEqual(
[text_type(x) for x in form.a],
['<input checked id="a-0" name="a" type="radio" value="a">', '<input id="a-1" name="a" type="radio" value="b">']
)
def test_text_coercion(self):
# Regression test for text coercsion scenarios where the value is a boolean.
coerce_func = lambda x: False if x == 'False' else bool(x)
F = make_form(a=RadioField(choices=[(True, 'yes'), (False, 'no')], coerce=coerce_func))
form = F()
self.assertEqual(
form.a(),
'''<ul id="a">'''
'''<li><input id="a-0" name="a" type="radio" value="True"> <label for="a-0">yes</label></li>'''
'''<li><input checked id="a-1" name="a" type="radio" value="False"> <label for="a-1">no</label></li></ul>'''
)
class StringFieldTest(TestCase):
class F(Form):
a = StringField()
def test(self):
form = self.F()
self.assertEqual(form.a.data, None)
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="">""")
form = self.F(DummyPostData(a=['hello']))
self.assertEqual(form.a.data, 'hello')
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="hello">""")
form = self.F(DummyPostData(b=['hello']))
self.assertEqual(form.a.data, '')
class HiddenFieldTest(TestCase):
class F(Form):
a = HiddenField(default="LE DEFAULT")
def test(self):
form = self.F()
self.assertEqual(form.a(), """<input id="a" name="a" type="hidden" value="LE DEFAULT">""")
self.assertTrue(form.a.flags.hidden)
class TextAreaFieldTest(TestCase):
class F(Form):
a = TextAreaField(default="LE DEFAULT")
def test(self):
form = self.F()
self.assertEqual(form.a(), """<textarea id="a" name="a">LE DEFAULT</textarea>""")
class PasswordFieldTest(TestCase):
class F(Form):
a = PasswordField(widget=widgets.PasswordInput(hide_value=False), default="LE DEFAULT")
b = PasswordField(default="Hai")
def test(self):
form = self.F()
self.assertEqual(form.a(), """<input id="a" name="a" type="password" value="LE DEFAULT">""")
self.assertEqual(form.b(), """<input id="b" name="b" type="password" value="">""")
class FileFieldTest(TestCase):
def test_file_field(self):
class F(Form):
file = FileField()
self.assertEqual(F(DummyPostData(file=['test.txt'])).file.data, 'test.txt')
self.assertEqual(F(DummyPostData()).file.data, None)
self.assertEqual(F(DummyPostData(file=['test.txt', 'multiple.txt'])).file.data, 'test.txt')
def test_multiple_file_field(self):
class F(Form):
files = MultipleFileField()
self.assertEqual(F(DummyPostData(files=['test.txt'])).files.data, ['test.txt'])
self.assertEqual(F(DummyPostData()).files.data, [])
self.assertEqual(F(DummyPostData(files=['test.txt', 'multiple.txt'])).files.data, ['test.txt', 'multiple.txt'])
def test_file_field_without_file_input(self):
class F(Form):
file = FileField(widget=TextInput())
f = F(DummyPostData(file=['test.txt']))
self.assertEqual(f.file.data, 'test.txt')
self.assertEqual(f.file(), '<input id="file" name="file" type="text">')
class IntegerFieldTest(TestCase):
class F(Form):
a = IntegerField()
b = IntegerField(default=48)
def test(self):
form = self.F(DummyPostData(a=['v'], b=['-15']))
self.assertEqual(form.a.data, None)
self.assertEqual(form.a.raw_data, ['v'])
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="v">""")
self.assertEqual(form.b.data, -15)
self.assertEqual(form.b(), """<input id="b" name="b" type="text" value="-15">""")
self.assertTrue(not form.a.validate(form))
self.assertTrue(form.b.validate(form))
form = self.F(DummyPostData(a=[], b=['']))
self.assertEqual(form.a.data, None)
self.assertEqual(form.a.raw_data, [])
self.assertEqual(form.b.data, None)
self.assertEqual(form.b.raw_data, [''])
self.assertTrue(not form.validate())
self.assertEqual(len(form.b.process_errors), 1)
self.assertEqual(len(form.b.errors), 1)
form = self.F(b=9)
self.assertEqual(form.b.data, 9)
self.assertEqual(form.a._value(), '')
self.assertEqual(form.b._value(), '9')
class DecimalFieldTest(TestCase):
def test(self):
F = make_form(a=DecimalField())
form = F(DummyPostData(a='2.1'))
self.assertEqual(form.a.data, Decimal('2.1'))
self.assertEqual(form.a._value(), '2.1')
form.a.raw_data = None
self.assertEqual(form.a._value(), '2.10')
self.assertTrue(form.validate())
form = F(DummyPostData(a='2,1'), a=Decimal(5))
self.assertEqual(form.a.data, None)
self.assertEqual(form.a.raw_data, ['2,1'])
self.assertFalse(form.validate())
form = F(DummyPostData(a='asdf'), a=Decimal('.21'))
self.assertEqual(form.a._value(), 'asdf')
assert not form.validate()
def test_quantize(self):
F = make_form(a=DecimalField(places=3, rounding=ROUND_UP), b=DecimalField(places=None))
form = F(a=Decimal('3.1415926535'))
self.assertEqual(form.a._value(), '3.142')
form.a.rounding = ROUND_DOWN
self.assertEqual(form.a._value(), '3.141')
self.assertEqual(form.b._value(), '')
form = F(a=3.14159265, b=72)
self.assertEqual(form.a._value(), '3.142')
self.assertTrue(isinstance(form.a.data, float))
self.assertEqual(form.b._value(), '72')
class FloatFieldTest(TestCase):
class F(Form):
a = FloatField()
b = FloatField(default=48.0)
def test(self):
form = self.F(DummyPostData(a=['v'], b=['-15.0']))
self.assertEqual(form.a.data, None)
self.assertEqual(form.a.raw_data, ['v'])
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="v">""")
self.assertEqual(form.b.data, -15.0)
self.assertEqual(form.b(), """<input id="b" name="b" type="text" value="-15.0">""")
self.assertFalse(form.a.validate(form))
self.assertTrue(form.b.validate(form))
form = self.F(DummyPostData(a=[], b=['']))
self.assertEqual(form.a.data, None)
self.assertEqual(form.a._value(), '')
self.assertEqual(form.b.data, None)
self.assertEqual(form.b.raw_data, [''])
self.assertFalse(form.validate())
self.assertEqual(len(form.b.process_errors), 1)
self.assertEqual(len(form.b.errors), 1)
form = self.F(b=9.0)
self.assertEqual(form.b.data, 9.0)
self.assertEqual(form.b._value(), "9.0")
class BooleanFieldTest(TestCase):
class BoringForm(Form):
bool1 = BooleanField()
bool2 = BooleanField(default=True, false_values=())
obj = AttrDict(bool1=None, bool2=True)
def test_defaults(self):
# Test with no post data to make sure defaults work
form = self.BoringForm()
self.assertEqual(form.bool1.raw_data, None)
self.assertEqual(form.bool1.data, False)
self.assertEqual(form.bool2.data, True)
def test_rendering(self):
form = self.BoringForm(DummyPostData(bool2="x"))
self.assertEqual(form.bool1(), '<input id="bool1" name="bool1" type="checkbox" value="y">')
self.assertEqual(form.bool2(), '<input checked id="bool2" name="bool2" type="checkbox" value="x">')
self.assertEqual(form.bool2.raw_data, ['x'])
def test_with_postdata(self):
form = self.BoringForm(DummyPostData(bool1=['a']))
self.assertEqual(form.bool1.raw_data, ['a'])
self.assertEqual(form.bool1.data, True)
form = self.BoringForm(DummyPostData(bool1=['false'], bool2=['false']))
self.assertEqual(form.bool1.data, False)
self.assertEqual(form.bool2.data, True)
def test_with_model_data(self):
form = self.BoringForm(obj=self.obj)
self.assertEqual(form.bool1.data, False)
self.assertEqual(form.bool1.raw_data, None)
self.assertEqual(form.bool2.data, True)
def test_with_postdata_and_model(self):
form = self.BoringForm(DummyPostData(bool1=['y']), obj=self.obj)
self.assertEqual(form.bool1.data, True)
self.assertEqual(form.bool2.data, False)
class DateFieldTest(TestCase):
class F(Form):
a = DateField()
b = DateField(format='%m/%d %Y')
def test_basic(self):
d = date(2008, 5, 7)
form = self.F(DummyPostData(a=['2008-05-07'], b=['05/07', '2008']))
self.assertEqual(form.a.data, d)
self.assertEqual(form.a._value(), '2008-05-07')
self.assertEqual(form.b.data, d)
self.assertEqual(form.b._value(), '05/07 2008')
def test_failure(self):
form = self.F(DummyPostData(a=['2008-bb-cc'], b=['hi']))
assert not form.validate()
self.assertEqual(len(form.a.process_errors), 1)
self.assertEqual(len(form.a.errors), 1)
self.assertEqual(len(form.b.errors), 1)
self.assertEqual(form.a.process_errors[0], 'Not a valid date value')
class TimeFieldTest(TestCase):
class F(Form):
a = TimeField()
b = TimeField(format='%H:%M')
def test_basic(self):
d = datetime(2008, 5, 5, 4, 30, 0, 0).time()
# Basic test with both inputs
form = self.F(DummyPostData(a=['4:30'], b=['04:30']))
self.assertEqual(form.a.data, d)
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="4:30">""")
self.assertEqual(form.b.data, d)
self.assertEqual(form.b(), """<input id="b" name="b" type="text" value="04:30">""")
self.assertTrue(form.validate())
# Test with a missing input
form = self.F(DummyPostData(a=['04']))
self.assertFalse(form.validate())
self.assertEqual(form.a.errors[0], 'Not a valid time value')
class DateTimeFieldTest(TestCase):
class F(Form):
a = DateTimeField()
b = DateTimeField(format='%Y-%m-%d %H:%M')
def test_basic(self):
d = datetime(2008, 5, 5, 4, 30, 0, 0)
# Basic test with both inputs
form = self.F(DummyPostData(a=['2008-05-05', '04:30:00'], b=['2008-05-05 04:30']))
self.assertEqual(form.a.data, d)
self.assertEqual(form.a(), """<input id="a" name="a" type="text" value="2008-05-05 04:30:00">""")
self.assertEqual(form.b.data, d)
self.assertEqual(form.b(), """<input id="b" name="b" type="text" value="2008-05-05 04:30">""")
self.assertTrue(form.validate())
# Test with a missing input
form = self.F(DummyPostData(a=['2008-05-05']))
self.assertFalse(form.validate())
self.assertEqual(form.a.errors[0], 'Not a valid datetime value')
form = self.F(a=d, b=d)
self.assertTrue(form.validate())
self.assertEqual(form.a._value(), '2008-05-05 04:30:00')
def test_microseconds(self):
d = datetime(2011, 5, 7, 3, 23, 14, 424200)
F = make_form(a=DateTimeField(format='%Y-%m-%d %H:%M:%S.%f'))
form = F(DummyPostData(a=['2011-05-07 03:23:14.4242']))
self.assertEqual(d, form.a.data)
class SubmitFieldTest(TestCase):
class F(Form):
a = SubmitField('Label')
def test(self):
self.assertEqual(self.F().a(), """<input id="a" name="a" type="submit" value="Label">""")
class FormFieldTest(TestCase):
def setUp(self):
F = make_form(
a=StringField(validators=[validators.DataRequired()]),
b=StringField(),
)
self.F1 = make_form('F1', a=FormField(F))
self.F2 = make_form('F2', a=FormField(F, separator='::'))
def test_formdata(self):
form = self.F1(DummyPostData({'a-a': ['moo']}))
self.assertEqual(form.a.form.a.name, 'a-a')
self.assertEqual(form.a['a'].data, 'moo')
self.assertEqual(form.a['b'].data, '')
self.assertTrue(form.validate())
def test_iteration(self):
self.assertEqual([x.name for x in self.F1().a], ['a-a', 'a-b'])
def test_with_obj(self):
obj = AttrDict(a=AttrDict(a='mmm'))
form = self.F1(obj=obj)
self.assertEqual(form.a.form.a.data, 'mmm')
self.assertEqual(form.a.form.b.data, None)
obj_inner = AttrDict(a=None, b='rawr')
obj2 = AttrDict(a=obj_inner)
form.populate_obj(obj2)
self.assertTrue(obj2.a is obj_inner)
self.assertEqual(obj_inner.a, 'mmm')
self.assertEqual(obj_inner.b, None)
def test_widget(self):
self.assertEqual(
self.F1().a(),
'''<table id="a">'''
'''<tr><th><label for="a-a">A</label></th><td><input id="a-a" name="a-a" type="text" value=""></td></tr>'''
'''<tr><th><label for="a-b">B</label></th><td><input id="a-b" name="a-b" type="text" value=""></td></tr>'''
'''</table>'''
)
def test_separator(self):
form = self.F2(DummyPostData({'a-a': 'fake', 'a::a': 'real'}))
self.assertEqual(form.a.a.name, 'a::a')
self.assertEqual(form.a.a.data, 'real')
self.assertTrue(form.validate())
def test_no_validators_or_filters(self):
class A(Form):
a = FormField(self.F1, validators=[validators.DataRequired()])
self.assertRaises(TypeError, A)
class B(Form):
a = FormField(self.F1, filters=[lambda x: x])
self.assertRaises(TypeError, B)
class C(Form):
a = FormField(self.F1)
def validate_a(form, field):
pass
form = C()
self.assertRaises(TypeError, form.validate)
def test_populate_missing_obj(self):
obj = AttrDict(a=None)
obj2 = AttrDict(a=AttrDict(a='mmm'))
form = self.F1()
self.assertRaises(TypeError, form.populate_obj, obj)
form.populate_obj(obj2)
class FieldListTest(TestCase):
t = StringField(validators=[validators.DataRequired()])
def test_form(self):
F = make_form(a=FieldList(self.t))
data = ['foo', 'hi', 'rawr']
a = F(a=data).a
self.assertEqual(a.entries[1].data, 'hi')
self.assertEqual(a.entries[1].name, 'a-1')
self.assertEqual(a.data, data)
self.assertEqual(len(a.entries), 3)
pdata = DummyPostData({'a-0': ['bleh'], 'a-3': ['yarg'], 'a-4': [''], 'a-7': ['mmm']})
form = F(pdata)
self.assertEqual(len(form.a.entries), 4)
self.assertEqual(form.a.data, ['bleh', 'yarg', '', 'mmm'])
self.assertFalse(form.validate())
form = F(pdata, a=data)
self.assertEqual(form.a.data, ['bleh', 'yarg', '', 'mmm'])
self.assertFalse(form.validate())
# Test for formdata precedence
pdata = DummyPostData({'a-0': ['a'], 'a-1': ['b']})
form = F(pdata, a=data)
self.assertEqual(len(form.a.entries), 2)
self.assertEqual(form.a.data, ['a', 'b'])
self.assertEqual(list(iter(form.a)), list(form.a.entries))
def test_enclosed_subform(self):
make_inner = lambda: AttrDict(a=None)
F = make_form(
a=FieldList(FormField(make_form('FChild', a=self.t), default=make_inner))
)
data = [{'a': 'hello'}]
form = F(a=data)
self.assertEqual(form.a.data, data)
self.assertTrue(form.validate())
form.a.append_entry()
self.assertEqual(form.a.data, data + [{'a': None}])
self.assertFalse(form.validate())
pdata = DummyPostData({'a-0': ['fake'], 'a-0-a': ['foo'], 'a-1-a': ['bar']})
form = F(pdata, a=data)
self.assertEqual(form.a.data, [{'a': 'foo'}, {'a': 'bar'}])
inner_obj = make_inner()
inner_list = [inner_obj]
obj = AttrDict(a=inner_list)
form.populate_obj(obj)
self.assertTrue(obj.a is not inner_list)
self.assertEqual(len(obj.a), 2)
self.assertTrue(obj.a[0] is inner_obj)
self.assertEqual(obj.a[0].a, 'foo')
self.assertEqual(obj.a[1].a, 'bar')
# Test failure on populate
obj2 = AttrDict(a=42)
self.assertRaises(TypeError, form.populate_obj, obj2)
def test_entry_management(self):
F = make_form(a=FieldList(self.t))
a = F(a=['hello', 'bye']).a
self.assertEqual(a.pop_entry().name, 'a-1')
self.assertEqual(a.data, ['hello'])
a.append_entry('orange')
self.assertEqual(a.data, ['hello', 'orange'])
self.assertEqual(a[-1].name, 'a-1')
self.assertEqual(a.pop_entry().data, 'orange')
self.assertEqual(a.pop_entry().name, 'a-0')
self.assertRaises(IndexError, a.pop_entry)
def test_min_max_entries(self):
F = make_form(a=FieldList(self.t, min_entries=1, max_entries=3))
a = F().a
self.assertEqual(len(a), 1)
self.assertEqual(a[0].data, None)
big_input = ['foo', 'flaf', 'bar', 'baz']
self.assertRaises(AssertionError, F, a=big_input)
pdata = DummyPostData(('a-%d' % i, v) for i, v in enumerate(big_input))
a = F(pdata).a
self.assertEqual(a.data, ['foo', 'flaf', 'bar'])
self.assertRaises(AssertionError, a.append_entry)
def test_validators(self):
def validator(form, field):
if field.data and field.data[0] == 'fail':
raise ValueError('fail')
elif len(field.data) > 2:
raise ValueError('too many')
F = make_form(a=FieldList(self.t, validators=[validator]))
# Case 1: length checking validators work as expected.
fdata = DummyPostData({'a-0': ['hello'], 'a-1': ['bye'], 'a-2': ['test3']})
form = F(fdata)
assert not form.validate()
self.assertEqual(form.a.errors, ['too many'])
# Case 2: checking a value within.
fdata['a-0'] = ['fail']
form = F(fdata)
assert not form.validate()
self.assertEqual(form.a.errors, ['fail'])
# Case 3: normal field validator still works
form = F(DummyPostData({'a-0': ['']}))
assert not form.validate()
self.assertEqual(form.a.errors, [['This field is required.']])
def test_no_filters(self):
my_filter = lambda x: x
self.assertRaises(TypeError, FieldList, self.t, filters=[my_filter], _form=Form(), _name='foo')
def test_process_prefilled(self):
data = ['foo', 'hi', 'rawr']
class A(object):
def __init__(self, a):
self.a = a
obj = A(data)
F = make_form(a=FieldList(self.t))
# fill form
form = F(obj=obj)
self.assertEqual(len(form.a.entries), 3)
# pretend to submit form unchanged
pdata = DummyPostData({
'a-0': ['foo'],
'a-1': ['hi'],
'a-2': ['rawr']})
form.process(formdata=pdata)
# check if data still the same
self.assertEqual(len(form.a.entries), 3)
self.assertEqual(form.a.data, data)
class MyCustomField(StringField):
def process_data(self, data):
if data == 'fail':
raise ValueError('Contrived Failure')
return super(MyCustomField, self).process_data(data)
class CustomFieldQuirksTest(TestCase):
class F(Form):
a = MyCustomField()
b = SelectFieldBase()
def test_processing_failure(self):
form = self.F(a='42')
assert form.validate()
form = self.F(a='fail')
assert not form.validate()
def test_default_impls(self):
f = self.F()
self.assertRaises(NotImplementedError, f.b.iter_choices)
class HTML5FieldsTest(TestCase):
class F(Form):
search = html5.SearchField()
telephone = html5.TelField()
url = html5.URLField()
email = html5.EmailField()
datetime = html5.DateTimeField()
date = html5.DateField()
dt_local = html5.DateTimeLocalField()
integer = html5.IntegerField()
decimal = html5.DecimalField()
int_range = html5.IntegerRangeField()
decimal_range = html5.DecimalRangeField()
def _build_value(self, key, form_input, expected_html, data=unset_value):
if data is unset_value:
data = form_input
if expected_html.startswith('type='):
expected_html = '<input id="%s" name="%s" %s value="%s">' % (key, key, expected_html, form_input)
return {
'key': key,
'form_input': form_input,
'expected_html': expected_html,
'data': data
}
def test_simple(self):
b = self._build_value
VALUES = (
b('search', 'search', 'type="search"'),
b('telephone', '123456789', 'type="tel"'),
b('url', 'http://wtforms.simplecodes.com/', 'type="url"'),
b('email', 'foo@bar.com', 'type="email"'),
b('datetime', '2013-09-05 00:23:42', 'type="datetime"', datetime(2013, 9, 5, 0, 23, 42)),
b('date', '2013-09-05', 'type="date"', date(2013, 9, 5)),
b('dt_local', '2013-09-05 00:23:42', 'type="datetime-local"', datetime(2013, 9, 5, 0, 23, 42)),
b('integer', '42', '<input id="integer" name="integer" step="1" type="number" value="42">', 42),
b('decimal', '43.5', '<input id="decimal" name="decimal" step="any" type="number" value="43.5">', Decimal('43.5')),
b('int_range', '4', '<input id="int_range" name="int_range" step="1" type="range" value="4">', 4),
b('decimal_range', '58', '<input id="decimal_range" name="decimal_range" step="any" type="range" value="58">', 58),
)
formdata = DummyPostData()
kw = {}
for item in VALUES:
formdata[item['key']] = item['form_input']
kw[item['key']] = item['data']
form = self.F(formdata)
for item in VALUES:
field = form[item['key']]
render_value = field()
if render_value != item['expected_html']:
tmpl = 'Field {key} render mismatch: {render_value!r} != {expected_html!r}'
raise AssertionError(tmpl.format(render_value=render_value, **item))
if field.data != item['data']:
tmpl = 'Field {key} data mismatch: {field.data!r} != {data!r}'
raise AssertionError(tmpl.format(field=field, **item))
| bsd-3-clause |
pgmillon/ansible | lib/ansible/plugins/cliconf/exos.py | 31 | 9970 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
cliconf: exos
short_description: Use exos cliconf to run command on Extreme EXOS platform
description:
- This exos plugin provides low level abstraction apis for
sending and receiving CLI commands from Extreme EXOS network devices.
version_added: "2.6"
"""
import re
import json
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.plugins.cliconf import CliconfBase
class Cliconf(CliconfBase):
def get_diff(self, candidate=None, running=None, diff_match='line', diff_ignore_lines=None, path=None, diff_replace='line'):
diff = {}
device_operations = self.get_device_operations()
option_values = self.get_option_values()
if candidate is None and device_operations['supports_generate_diff']:
raise ValueError("candidate configuration is required to generate diff")
if diff_match not in option_values['diff_match']:
raise ValueError("'match' value %s in invalid, valid values are %s" % (diff_match, ', '.join(option_values['diff_match'])))
if diff_replace not in option_values['diff_replace']:
raise ValueError("'replace' value %s in invalid, valid values are %s" % (diff_replace, ', '.join(option_values['diff_replace'])))
# prepare candidate configuration
candidate_obj = NetworkConfig(indent=1)
candidate_obj.load(candidate)
if running and diff_match != 'none' and diff_replace != 'config':
# running configuration
running_obj = NetworkConfig(indent=1, contents=running, ignore_lines=diff_ignore_lines)
configdiffobjs = candidate_obj.difference(running_obj, path=path, match=diff_match, replace=diff_replace)
else:
configdiffobjs = candidate_obj.items
diff['config_diff'] = dumps(configdiffobjs, 'commands') if configdiffobjs else ''
return diff
def get_device_info(self):
device_info = {}
device_info['network_os'] = 'exos'
reply = self.run_commands({'command': 'show switch detail', 'output': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'ExtremeXOS version (\S+)', data)
if match:
device_info['network_os_version'] = match.group(1)
match = re.search(r'System Type: +(\S+)', data)
if match:
device_info['network_os_model'] = match.group(1)
match = re.search(r'SysName: +(\S+)', data)
if match:
device_info['network_os_hostname'] = match.group(1)
return device_info
def get_default_flag(self):
# The flag to modify the command to collect configuration with defaults
return 'detail'
def get_config(self, source='running', format='text', flags=None):
options_values = self.get_option_values()
if format not in options_values['format']:
raise ValueError("'format' value %s is invalid. Valid values are %s" % (format, ','.join(options_values['format'])))
lookup = {'running': 'show configuration', 'startup': 'debug cfgmgr show configuration file'}
if source not in lookup:
raise ValueError("fetching configuration from %s is not supported" % source)
cmd = {'command': lookup[source], 'output': 'text'}
if source == 'startup':
reply = self.run_commands({'command': 'show switch', 'format': 'text'})
data = to_text(reply, errors='surrogate_or_strict').strip()
match = re.search(r'Config Selected: +(\S+)\.cfg', data, re.MULTILINE)
if match:
cmd['command'] += match.group(1)
else:
# No Startup(/Selected) Config
return {}
cmd['command'] += ' '.join(to_list(flags))
cmd['command'] = cmd['command'].strip()
return self.run_commands(cmd)[0]
def edit_config(self, candidate=None, commit=True, replace=None, diff=False, comment=None):
resp = {}
operations = self.get_device_operations()
self.check_edit_config_capability(operations, candidate, commit, replace, comment)
results = []
requests = []
if commit:
for line in to_list(candidate):
if not isinstance(line, Mapping):
line = {'command': line}
results.append(self.send_command(**line))
requests.append(line['command'])
else:
raise ValueError('check mode is not supported')
resp['request'] = requests
resp['response'] = results
return resp
def get(self, command, prompt=None, answer=None, sendonly=False, output=None, newline=True, check_all=False):
if output:
command = self._get_command_with_output(command, output)
return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
def run_commands(self, commands=None, check_rc=True):
if commands is None:
raise ValueError("'commands' value is required")
responses = list()
for cmd in to_list(commands):
if not isinstance(cmd, Mapping):
cmd = {'command': cmd}
output = cmd.pop('output', None)
if output:
cmd['command'] = self._get_command_with_output(cmd['command'], output)
try:
out = self.send_command(**cmd)
except AnsibleConnectionFailure as e:
if check_rc is True:
raise
out = getattr(e, 'err', e)
if out is not None:
try:
out = to_text(out, errors='surrogate_or_strict').strip()
except UnicodeError:
raise ConnectionError(message=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
if output and output == 'json':
try:
out = json.loads(out)
except ValueError:
raise ConnectionError('Response was not valid JSON, got {0}'.format(
to_text(out)
))
responses.append(out)
return responses
def get_device_operations(self):
return {
'supports_diff_replace': False, # identify if config should be merged or replaced is supported
'supports_commit': False, # identify if commit is supported by device or not
'supports_rollback': False, # identify if rollback is supported or not
'supports_defaults': True, # identify if fetching running config with default is supported
'supports_commit_comment': False, # identify if adding comment to commit is supported of not
'supports_onbox_diff': False, # identify if on box diff capability is supported or not
'supports_generate_diff': True, # identify if diff capability is supported within plugin
'supports_multiline_delimiter': False, # identify if multiline delimiter is supported within config
'supports_diff_match': True, # identify if match is supported
'supports_diff_ignore_lines': True, # identify if ignore line in diff is supported
'supports_config_replace': False, # identify if running config replace with candidate config is supported
'supports_admin': False, # identify if admin configure mode is supported or not
'supports_commit_label': False, # identify if commit label is supported or not
'supports_replace': False
}
def get_option_values(self):
return {
'format': ['text', 'json'],
'diff_match': ['line', 'strict', 'exact', 'none'],
'diff_replace': ['line', 'block'],
'output': ['text', 'json']
}
def get_capabilities(self):
result = super(Cliconf, self).get_capabilities()
result['rpc'] += ['run_commmands', 'get_default_flag', 'get_diff']
result['device_operations'] = self.get_device_operations()
result['device_info'] = self.get_device_info()
result.update(self.get_option_values())
return json.dumps(result)
def _get_command_with_output(self, command, output):
if output not in self.get_option_values().get('output'):
raise ValueError("'output' value is %s is invalid. Valid values are %s" % (output, ','.join(self.get_option_values().get('output'))))
if output == 'json' and not command.startswith('run script cli2json.py'):
cmd = 'run script cli2json.py %s' % command
else:
cmd = command
return cmd
| gpl-3.0 |
denisenkom/django | tests/generic_views/urls.py | 6 | 12352 | from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView
from . import models
from . import views
urlpatterns = patterns('',
# base
#(r'^about/login-required/$',
# views.DecoratedAboutView()),
# TemplateView
(r'^template/no_template/$',
TemplateView.as_view()),
(r'^template/simple/(?P<foo>\w+)/$',
TemplateView.as_view(template_name='generic_views/about.html')),
(r'^template/custom/(?P<foo>\w+)/$',
views.CustomTemplateView.as_view(template_name='generic_views/about.html')),
(r'^template/content_type/$',
TemplateView.as_view(template_name='generic_views/robots.txt', content_type='text/plain')),
(r'^template/cached/(?P<foo>\w+)/$',
cache_page(2.0)(TemplateView.as_view(template_name='generic_views/about.html'))),
# DetailView
(r'^detail/obj/$',
views.ObjectDetail.as_view()),
url(r'^detail/artist/(?P<pk>\d+)/$',
views.ArtistDetail.as_view(),
name="artist_detail"),
url(r'^detail/author/(?P<pk>\d+)/$',
views.AuthorDetail.as_view(),
name="author_detail"),
(r'^detail/author/bycustompk/(?P<foo>\d+)/$',
views.AuthorDetail.as_view(pk_url_kwarg='foo')),
(r'^detail/author/byslug/(?P<slug>[\w-]+)/$',
views.AuthorDetail.as_view()),
(r'^detail/author/bycustomslug/(?P<foo>[\w-]+)/$',
views.AuthorDetail.as_view(slug_url_kwarg='foo')),
(r'^detail/author/(?P<pk>\d+)/template_name_suffix/$',
views.AuthorDetail.as_view(template_name_suffix='_view')),
(r'^detail/author/(?P<pk>\d+)/template_name/$',
views.AuthorDetail.as_view(template_name='generic_views/about.html')),
(r'^detail/author/(?P<pk>\d+)/context_object_name/$',
views.AuthorDetail.as_view(context_object_name='thingy')),
(r'^detail/author/(?P<pk>\d+)/dupe_context_object_name/$',
views.AuthorDetail.as_view(context_object_name='object')),
(r'^detail/page/(?P<pk>\d+)/field/$',
views.PageDetail.as_view()),
(r'^detail/author/invalid/url/$',
views.AuthorDetail.as_view()),
(r'^detail/author/invalid/qs/$',
views.AuthorDetail.as_view(queryset=None)),
(r'^detail/nonmodel/1/$',
views.NonModelDetail.as_view()),
# FormView
(r'^contact/$',
views.ContactView.as_view()),
# Create/UpdateView
(r'^edit/artists/create/$',
views.ArtistCreate.as_view()),
(r'^edit/artists/(?P<pk>\d+)/update/$',
views.ArtistUpdate.as_view()),
(r'^edit/authors/create/naive/$',
views.NaiveAuthorCreate.as_view()),
(r'^edit/authors/create/redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/authors/create/')),
(r'^edit/authors/create/interpolate_redirect/$',
views.NaiveAuthorCreate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/authors/create/restricted/$',
views.AuthorCreateRestricted.as_view()),
(r'^edit/authors/create/$',
views.AuthorCreate.as_view()),
(r'^edit/authors/create/special/$',
views.SpecializedAuthorCreate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/naive/$',
views.NaiveAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/update/interpolate_redirect/$',
views.NaiveAuthorUpdate.as_view(success_url='/edit/author/%(id)d/update/')),
(r'^edit/author/(?P<pk>\d+)/update/$',
views.AuthorUpdate.as_view()),
(r'^edit/author/update/$',
views.OneAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/update/special/$',
views.SpecializedAuthorUpdate.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/naive/$',
views.NaiveAuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/')),
(r'^edit/author/(?P<pk>\d+)/delete/interpolate_redirect/$',
views.NaiveAuthorDelete.as_view(success_url='/edit/authors/create/?deleted=%(id)s')),
(r'^edit/author/(?P<pk>\d+)/delete/$',
views.AuthorDelete.as_view()),
(r'^edit/author/(?P<pk>\d+)/delete/special/$',
views.SpecializedAuthorDelete.as_view()),
# ArchiveIndexView
(r'^dates/books/$',
views.BookArchive.as_view()),
(r'^dates/books/context_object_name/$',
views.BookArchive.as_view(context_object_name='thingies')),
(r'^dates/books/allow_empty/$',
views.BookArchive.as_view(allow_empty=True)),
(r'^dates/books/template_name/$',
views.BookArchive.as_view(template_name='generic_views/list.html')),
(r'^dates/books/template_name_suffix/$',
views.BookArchive.as_view(template_name_suffix='_detail')),
(r'^dates/books/invalid/$',
views.BookArchive.as_view(queryset=None)),
(r'^dates/books/paginated/$',
views.BookArchive.as_view(paginate_by=10)),
(r'^dates/books/reverse/$',
views.BookArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
(r'^dates/books/by_month/$',
views.BookArchive.as_view(date_list_period='month')),
(r'^dates/booksignings/$',
views.BookSigningArchive.as_view()),
# ListView
(r'^list/dict/$',
views.DictList.as_view()),
(r'^list/dict/paginated/$',
views.DictList.as_view(paginate_by=1)),
url(r'^list/artists/$',
views.ArtistList.as_view(),
name="artists_list"),
url(r'^list/authors/$',
views.AuthorList.as_view(),
name="authors_list"),
(r'^list/authors/paginated/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/paginated/(?P<page>\d+)/$',
views.AuthorList.as_view(paginate_by=30)),
(r'^list/authors/paginated-orphaned/$',
views.AuthorList.as_view(paginate_by=30, paginate_orphans=2)),
(r'^list/authors/notempty/$',
views.AuthorList.as_view(allow_empty=False)),
(r'^list/authors/notempty/paginated/$',
views.AuthorList.as_view(allow_empty=False, paginate_by=2)),
(r'^list/authors/template_name/$',
views.AuthorList.as_view(template_name='generic_views/list.html')),
(r'^list/authors/template_name_suffix/$',
views.AuthorList.as_view(template_name_suffix='_objects')),
(r'^list/authors/context_object_name/$',
views.AuthorList.as_view(context_object_name='author_list')),
(r'^list/authors/dupe_context_object_name/$',
views.AuthorList.as_view(context_object_name='object_list')),
(r'^list/authors/invalid/$',
views.AuthorList.as_view(queryset=None)),
(r'^list/authors/paginated/custom_class/$',
views.AuthorList.as_view(paginate_by=5, paginator_class=views.CustomPaginator)),
(r'^list/authors/paginated/custom_page_kwarg/$',
views.AuthorList.as_view(paginate_by=30, page_kwarg='pagina')),
(r'^list/authors/paginated/custom_constructor/$',
views.AuthorListCustomPaginator.as_view()),
# YearArchiveView
# Mixing keyword and possitional captures below is intentional; the views
# ought to be able to accept either.
(r'^dates/books/(?P<year>\d{4})/$',
views.BookYearArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/make_object_list/$',
views.BookYearArchive.as_view(make_object_list=True)),
(r'^dates/books/(?P<year>\d{4})/allow_empty/$',
views.BookYearArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/allow_future/$',
views.BookYearArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/paginated/$',
views.BookYearArchive.as_view(make_object_list=True, paginate_by=30)),
(r'^dates/books/no_year/$',
views.BookYearArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/reverse/$',
views.BookYearArchive.as_view(queryset=models.Book.objects.order_by('pubdate'))),
(r'^dates/booksignings/(?P<year>\d{4})/$',
views.BookSigningYearArchive.as_view()),
# MonthArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookMonthArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/$',
views.BookMonthArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_empty/$',
views.BookMonthArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/allow_future/$',
views.BookMonthArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/paginated/$',
views.BookMonthArchive.as_view(paginate_by=30)),
(r'^dates/books/(?P<year>\d{4})/no_month/$',
views.BookMonthArchive.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
views.BookSigningMonthArchive.as_view()),
# WeekArchiveView
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_empty/$',
views.BookWeekArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/allow_future/$',
views.BookWeekArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/paginated/$',
views.BookWeekArchive.as_view(paginate_by=30)),
(r'^dates/books/(?P<year>\d{4})/week/no_week/$',
views.BookWeekArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/week/(?P<week>\d{1,2})/monday/$',
views.BookWeekArchive.as_view(week_format='%W')),
(r'^dates/booksignings/(?P<year>\d{4})/week/(?P<week>\d{1,2})/$',
views.BookSigningWeekArchive.as_view()),
# DayArchiveView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/$',
views.BookDayArchive.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty/$',
views.BookDayArchive.as_view(allow_empty=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_future/$',
views.BookDayArchive.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/allow_empty_and_future/$',
views.BookDayArchive.as_view(allow_empty=True, allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/paginated/$',
views.BookDayArchive.as_view(paginate_by=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/no_day/$',
views.BookDayArchive.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/$',
views.BookSigningDayArchive.as_view()),
# TodayArchiveView
(r'^dates/books/today/$',
views.BookTodayArchive.as_view()),
(r'^dates/books/today/allow_empty/$',
views.BookTodayArchive.as_view(allow_empty=True)),
(r'^dates/booksignings/today/$',
views.BookSigningTodayArchive.as_view()),
# DateDetailView
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetail.as_view(month_format='%m')),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/allow_future/$',
views.BookDetail.as_view(allow_future=True)),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/nopk/$',
views.BookDetail.as_view()),
(r'^dates/books/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/byslug/(?P<slug>[\w-]+)/$',
views.BookDetail.as_view()),
(r'^dates/books/get_object_custom_queryset/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookDetailGetObjectCustomQueryset.as_view()),
(r'^dates/booksignings/(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\d{1,2})/(?P<pk>\d+)/$',
views.BookSigningDetail.as_view()),
# Useful for testing redirects
(r'^accounts/login/$', 'django.contrib.auth.views.login')
)
| bsd-3-clause |
loco-odoo/localizacion_co | doc/_themes/odoodoc/sphinx_monkeypatch.py | 22 | 3300 | # -*- coding: utf-8 -*-
import sphinx.roles
import sphinx.environment
from sphinx.writers.html import HTMLTranslator
from docutils.writers.html4css1 import HTMLTranslator as DocutilsTranslator
def patch():
# navify toctree (oh god)
@monkey(sphinx.environment.BuildEnvironment)
def resolve_toctree(old_resolve, self, *args, **kwargs):
""" If navbar, bootstrapify TOC to yield a navbar
"""
navbar = kwargs.pop('navbar', None)
toc = old_resolve(self, *args, **kwargs)
if toc is None:
return None
navbarify(toc[0], navbar=navbar)
return toc
# monkeypatch visit_table to remove border and add .table
HTMLTranslator.visit_table = visit_table
# disable colspec crap
HTMLTranslator.write_colspecs = lambda self: None
# copy data- attributes straight from source to dest
HTMLTranslator.starttag = starttag_data
def navbarify(node, navbar=None):
"""
:param node: toctree node to navbarify
:param navbar: Whether this toctree is a 'main' navbar, a 'side' navbar or
not a navbar at all
"""
if navbar == 'side':
for n in node.traverse():
if n.tagname == 'bullet_list':
n['classes'].append('nav')
elif navbar == 'main':
# add classes to just toplevel
node['classes'].extend(['nav', 'navbar-nav', 'navbar-right'])
for list_item in node.children:
# bullet_list
# list_item
# compact_paragraph
# reference
# bullet_list
# list_item
# compact_paragraph
# reference
# no bullet_list.list_item -> don't dropdownify
if not list_item.children[1].children:
return
list_item['classes'].append('dropdown')
# list_item.compact_paragraph.reference
link = list_item.children[0].children[0]
link['classes'].append('dropdown-toggle')
link.attributes['data-toggle'] = 'dropdown'
# list_item.bullet_list
list_item.children[1]['classes'].append('dropdown-menu')
def visit_table(self, node):
"""
* remove border
* add table class
"""
self._table_row_index = 0
self.context.append(self.compact_p)
self.compact_p = True
classes = {self.settings.table_style}
node_classes = node.get('classes', [])
if 'no-table' in node_classes: node_classes.remove('no-table')
else: classes.add('table')
self.body.append(self.starttag(node, 'table', CLASS=' '.join(classes).strip()))
def starttag_data(self, node, tagname, suffix='\n', empty=False, **attributes):
attributes.update(
(k, v) for k, v in node.attributes.iteritems()
if k.startswith('data-')
)
# oh dear
return DocutilsTranslator.starttag(
self, node, tagname, suffix=suffix, empty=empty, **attributes)
class monkey(object):
def __init__(self, obj):
self.obj = obj
def __call__(self, fn):
name = fn.__name__
old = getattr(self.obj, name)
setattr(self.obj, name, lambda self_, *args, **kwargs: \
fn(old, self_, *args, **kwargs))
| agpl-3.0 |
xdevelsistemas/taiga-back-community | taiga/base/utils/iterators.py | 2 | 1996 | # -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2016 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import wraps, partial
from django.core.paginator import Paginator
def as_tuple(function=None, *, remove_nulls=False):
if function is None:
return partial(as_tuple, remove_nulls=remove_nulls)
@wraps(function)
def _decorator(*args, **kwargs):
return list(function(*args, **kwargs))
return _decorator
def as_dict(function):
@wraps(function)
def _decorator(*args, **kwargs):
return dict(function(*args, **kwargs))
return _decorator
def split_by_n(seq:str, n:int):
"""
A generator to divide a sequence into chunks of n units.
"""
while seq:
yield seq[:n]
seq = seq[n:]
def iter_queryset(queryset, itersize:int=20):
"""
Util function for iterate in more efficient way
all queryset.
"""
paginator = Paginator(queryset, itersize)
for page_num in paginator.page_range:
page = paginator.page(page_num)
for element in page.object_list:
yield element
| agpl-3.0 |
fintler/vbox | src/libs/xpcom18a4/python/vboxxpcom.py | 4 | 1901 | """
Copyright (C) 2008-2012 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
"""
import xpcom
import sys
import platform
# this code overcomes somewhat unlucky feature of Python, where it searches
# for binaries in the same place as platfom independent modules, while
# rest of Python bindings expect _xpcom to be inside xpcom module
candidates = ['VBoxPython' + str(sys.version_info[0]) + '_' + str(sys.version_info[1]),
'VBoxPython' + str(sys.version_info[0]),
'VBoxPython']
if platform.system() == 'Darwin':
# On Darwin (aka Mac OS X) we know exactly where things are in a normal
# VirtualBox installation. Also, there are two versions of python there
# (2.3.x and 2.5.x) depending on whether the os is striped or spotty, so
# we have to choose the right module to load.
#
# XXX: This needs to be adjusted for OSE builds. A more general solution would
# be to to sed the file during install and inject the VBOX_PATH_APP_PRIVATE_ARCH
# and VBOX_PATH_SHARED_LIBS when these are set.
sys.path.append('/Applications/VirtualBox.app/Contents/MacOS')
cglue = None
for m in candidates:
try:
cglue = __import__(m)
break
except:
pass
if platform.system() == 'Darwin':
sys.path.remove('/Applications/VirtualBox.app/Contents/MacOS')
if cglue == None:
raise Exception, "Cannot find VBoxPython module"
sys.modules['xpcom._xpcom'] = cglue
xpcom._xpcom = cglue
| gpl-2.0 |
LanderU/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/gentools.py | 214 | 6644 | #! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Library for supporting message and service generation for all ROS
client libraries. This is mainly responsible for calculating the
md5sums and message definitions of classes.
"""
# NOTE: this should not contain any rospy-specific code. The rospy
# generator library is rospy.genpy.
import sys
import hashlib
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . import msgs
from .msgs import InvalidMsgSpec, MsgSpec, bare_msg_type, is_builtin
from .msg_loader import load_depends
from .srvs import SrvSpec
from . import names
from . import base
def compute_md5_text(msg_context, spec):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
:returns: text for ROS MD5-processing, ``str``
"""
package = spec.package
buff = StringIO()
for c in spec.constants:
buff.write("%s %s=%s\n"%(c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
msg_type = bare_msg_type(type_)
# md5 spec strips package names
if is_builtin(msg_type):
buff.write("%s %s\n"%(type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
sub_pkg, _ = names.package_resource_name(msg_type)
sub_pkg = sub_pkg or package
sub_spec = msg_context.get_registered(msg_type)
sub_md5 = compute_md5(msg_context, sub_spec)
buff.write("%s %s\n"%(sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(msg_context, spec, hash):
"""
subroutine of compute_md5()
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute hash for.
:param hash: hash instance
"""
# accumulate the hash
# - root file
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(msg_context, spec).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(msg_context, spec.request).encode())
hash.update(compute_md5_text(msg_context, spec.response).encode())
else:
raise Exception("[%s] is not a message or service"%spec)
return hash.hexdigest()
def compute_md5(msg_context, spec):
"""
Compute md5 hash for message/service
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute md5 for.
:returns: md5 hash, ``str``
"""
return _compute_hash(msg_context, spec, hashlib.md5())
## alias
compute_md5_v2 = compute_md5
def _unique_deps(dep_list):
uniques = []
for d in dep_list:
if d not in uniques:
uniques.append(d)
return uniques
def compute_full_text(msg_context, spec):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` to compute full text for.
:returns: concatenated text for msg/srv file and embedded msg/srv types, ``str``
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(spec.text)
buff.write('\n')
# append the text of the dependencies (embedded types). Can't use set() as we have to preserve order.
for d in _unique_deps(msg_context.get_all_depends(spec.full_name)):
buff.write(sep)
buff.write("MSG: %s\n"%d)
buff.write(msg_context.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def compute_full_type_name(package_name, file_name):
"""
Compute the full type name of message/service 'pkg/type'.
:param package_name: name of package file is in, ``str``
:file_name: name of the msg og srv file, ``str``
:returns: typename in format 'pkg/type'
:raises: :exc:`MsgGenerationException` if file_name ends with an unknown file extension
"""
# strip extension
for ext in (base.EXT_MSG, base.EXT_SRV):
if file_name.endswith(ext):
short_name = file_name[:-len(ext)]
break
else:
raise base.MsgGenerationException("Processing file: '%s' - unknown file extension"% (file_name))
return "%s/%s"%(package_name, short_name)
| gpl-3.0 |
HugoMMRabson/fonsa | src/my/oledhat/interface.py | 1 | 12780 | '''
my.oledhat.interface
This library contains the high-level functions for drawing on the MyOled class (the means of publishing
new images onto the NanoHat OLED device).
Key functions:-
prompt_for_keyboard_text display keyboard; let user enter a phrase; return the phrase
choose_from_list display a list; let the user choose an item; return the item
draw_simple_message_and_buttons_screen ;)
Created on Jan 14, 2018
@author: johnrabsonjr
from testme import *
app = QApplication(sys.argv)
w = NanohateOledSimulator(None)
from my.oledhat.interface import *
prompt_for_keyboard_text(title='Choose one')
#draw_keyboard(0, (0,1), 10, 63)
'''
import dbm
import os
import time
from my.globals.consts import INFERNALPORPOISES_PASSWD_FNAME, RUNNING_ON_MAC
from my.oledhat.classes import MyOled, ALIGN_CENTER, ALIGN_LEFT
from my.v2and3 import getkeypress, is_a_keypress_in_queue
OUR_PWDCT_FNAME = '/%s/fonsa/pwdct' % ('tmp' if RUNNING_ON_MAC else 'etc')
OUR_AP_INFO_FNAME = '/%s/fonsa/apinfo' % ('tmp' if RUNNING_ON_MAC else 'etc')
os.system('p=%s; mkdir -p $p; chmod 700 $p; rmdir $p' % OUR_PWDCT_FNAME)
try:
OUR_PW_DCT = dbm.open(OUR_PWDCT_FNAME, 'c')
except Exception as e:
print("Failed to get OUR_PW_DCT. => %s" % str(e))
OUR_PW_DCT = dbm.open(OUR_PWDCT_FNAME, 'n')
if os.path.exists(INFERNALPORPOISES_PASSWD_FNAME):
OUR_PW_DCT['From the Window'] = open(INFERNALPORPOISES_PASSWD_FNAME,
'r').read().strip('\n')
KEYBOARDS_LST = (((
'a',
'b',
'c',
'd',
'e',
'f',
'<=',
'=>',
), (
'g',
'h',
'i',
'j',
'k',
'l',
'_delete',
), (
'm',
'n',
'o',
'p',
'q',
'r',
'_space',
), ('s', 't', 'u', 'v', 'w', 'x', '_enter'),
('y', 'z', "'", '"', '!', '?', '_cancel')), ((
'A',
'B',
'C',
'D',
'E',
'F',
'<=',
'=>',
), (
'G',
'H',
'I',
'J',
'K',
'L',
'_delete',
), (
'M',
'N',
'O',
'P',
'Q',
'R',
'_space',
), ('S', 'T', 'U', 'V', 'W', 'X',
'_enter'), ('Y', 'Z', ';', ':', '{', '}', '_cancel')), ((
'1',
'2',
'3',
'+',
'@',
'#',
'<=',
'=>',
), (
'4',
'5',
'6',
'$',
'%',
'^',
'_delete',
), (
'7',
'8',
'9',
'-',
'_',
'=',
'_cancel1',
), ('*', '0', '~', '&', '(', ')',
'_enter'), ('/', '\\', '[', ']', '<', '>',
'_cancel')))
def add_keyboard_button(coords, label, special, inverted):
if inverted:
tx, ty, bx, by = coords
tx -= 2 if len(label) > 2 else 0.0
bx += 1 if len(label) > 2 else 0.0
# ty += 0.5
by += 2
MyOled.draw.rectangle((tx, ty, bx, by), fill=128, outline=128)
MyOled.add_text(
coords,
label,
bold=True if inverted else False,
fontsize=12 if inverted and len(label) == 1 else 10 if special else 10,
fill=0
if inverted else 128) # , fontsize, fill, horizalign, vertalign)
def draw_keyboard(current_keyboard_number, current_position, vert_start,
vert_end):
cx, cy = current_position
xstep = (MyOled.width - 1) / 8
ystep = (vert_end - vert_start) / 5
MyOled.draw.rectangle((0, vert_start, MyOled.width - 1, vert_end), fill=0)
for rowno in range(0, 5):
column_len = len(KEYBOARDS_LST[current_keyboard_number][rowno])
for colno in range(0, column_len):
str_to_show = KEYBOARDS_LST[current_keyboard_number][rowno][colno]
tx = xstep * colno
ty = ystep * rowno + vert_start
bx = tx + xstep - 1
by = ty + ystep
this_is_a_special_button = False
if str_to_show[0] == '_':
str_to_show = str_to_show[1:]
# if colno == column_len - 1 and column_len < 8:
this_is_a_special_button = True
bx += xstep
# print(str((tx, ty, bx, by)))
add_keyboard_button(
(tx, ty, bx, by),
str_to_show,
special=this_is_a_special_button,
inverted=True if rowno == cy and colno == cx else False)
# def draw_network_connections_chooser_screen(list_of_network_connections):
# draw_simple_message_and_buttons_screen('Hotspot chooser', 'up', 'down', 'choose')
def prompt_for_keyboard_text(prompt='PW:',
promptfontsize=10,
vert_start=10,
vert_end=MyOled.height_above_buttons - 1):
'''Draw a keyboard. Let the user peck out a series of characters. Then let the user accept/reject the string.
'''
current_keyboard_number = 0
current_x = len(KEYBOARDS_LST[0][-1]) - 1
current_y = len(KEYBOARDS_LST[0]) - 1
outval = ''
enter_pushed = False
cancel_pushed = False
k = None
while not enter_pushed and not cancel_pushed:
# MyOled.wipe()
MyOled.add_text(
(0, 0, MyOled.width - 1, vert_start - 1),
text=prompt + outval,
fontsize=promptfontsize,
horizalign=ALIGN_LEFT,
wipe_first=True)
draw_keyboard(current_keyboard_number, (current_x, current_y),
vert_start, vert_end)
if k is None:
MyOled.add_buttons_labels(('down', 'right', 'choose'))
else:
MyOled.refresh_buttons(k)
if not is_a_keypress_in_queue():
MyOled.refresh_buttons(None)
k = getkeypress()
if k == 1:
current_y = (current_y + 1) % len(
KEYBOARDS_LST[current_keyboard_number])
if current_x >= len(
KEYBOARDS_LST[current_keyboard_number][current_y]):
current_x = len(
KEYBOARDS_LST[current_keyboard_number][current_y]) - 1
elif k == 2:
current_x = (current_x + 1) % len(
KEYBOARDS_LST[current_keyboard_number][current_y])
else:
s = KEYBOARDS_LST[current_keyboard_number][current_y][current_x]
if s == '_enter':
enter_pushed = True
elif s == '_cancel':
outval = None
cancel_pushed = True
elif s == '_space':
outval += ' '
elif s == '_delete':
outval = outval[0:max(0, len(outval) - 1)]
elif s == '<=':
current_keyboard_number = (current_keyboard_number + len(
KEYBOARDS_LST) - 1) % len(KEYBOARDS_LST)
elif s == '=>':
current_keyboard_number = (
current_keyboard_number + 1) % len(KEYBOARDS_LST)
else:
outval += s
return outval
def choose_from_list(my_lst,
title='Choose one',
vert_start=11,
vert_end=MyOled.height_above_buttons - 2):
itemno = 0
total_items_per_windowful = 4
retval = None
mylabels = ('up', 'down', 'choose')
vertslider_width_in_pixels = 6
fontsize = 10
# MyOled.add_buttons_labels(mylabels)
top_itemno_on_display = 0
tx = 0
ty = vert_start
bx = MyOled.width - 1
by = vert_end
lineheight = (by - ty) / total_items_per_windowful
MyOled.add_buttons_labels(mylabels)
MyOled.refresh()
while retval is None:
MyOled.wipe()
# print('%d, %d, %d, %d' % (tx, ty, bx, by))
MyOled.add_text((tx, 0, bx, ty - 1), text=title, fontsize=14)
bottom_itemno_on_display = min(
top_itemno_on_display + total_items_per_windowful - 1,
len(my_lst) - 1)
draw_windowful_of_choosable_items(
top_itemno_on_display, bottom_itemno_on_display, fontsize, my_lst,
itemno, lineheight, bx, tx, ty, vertslider_width_in_pixels)
if len(my_lst) > total_items_per_windowful:
draw_slider_for_choosable_items(my_lst, itemno, lineheight, bx, ty,
vertslider_width_in_pixels,
total_items_per_windowful)
MyOled.refresh()
k = getkeypress()
MyOled.refresh_buttons(buttonpushed=k)
# MyOled.add_buttons_labels(mylabels, (k == 1, k == 2, k == 3))
if k == 1:
if itemno > 0:
itemno -= 1
if itemno < top_itemno_on_display:
top_itemno_on_display -= 1
elif k == 2:
if itemno < len(my_lst) - 1:
itemno += 1
if itemno > bottom_itemno_on_display:
top_itemno_on_display += 1
else:
retval = my_lst[itemno]
MyOled.refresh_buttons(
buttonpushed=None
) # No button pushed! Yay! That means we'll refresh the buttons and show NO SELECTION.
return retval
def draw_windowful_of_choosable_items(
top_itemno_on_display, bottom_itemno_on_display, fontsize, my_lst,
itemno, lineheight, bx, tx, ty, vertslider_width_in_pixels):
for this_line_on_display in range(top_itemno_on_display,
bottom_itemno_on_display + 1):
current_entry = my_lst[this_line_on_display]
text_to_display = current_entry # '%d) %s' % (lino, current_entry)
y_delta = this_line_on_display - top_itemno_on_display
if this_line_on_display == itemno:
MyOled.draw.rectangle(
(tx, ty + y_delta * lineheight,
bx - vertslider_width_in_pixels - 2,
ty + (y_delta + 1) * lineheight),
fill=255,
outline=255)
MyOled.add_text(
(tx, ty + y_delta * lineheight, bx,
ty + (y_delta + 1) * lineheight),
text=text_to_display,
fontsize=fontsize,
fill=0 if this_line_on_display == itemno else 128,
horizalign=ALIGN_LEFT,
vertalign=ALIGN_CENTER)
def draw_slider_for_choosable_items(my_lst, itemno, lineheight, bx, ty,
vertslider_width_in_pixels,
total_items_per_windowful):
# Draw slider
vertslider_range_in_pixels = lineheight * total_items_per_windowful
cliqheight_in_pixels = vertslider_range_in_pixels / len(my_lst)
start_of_scrollmarker = ty + cliqheight_in_pixels * itemno
end_of_scrollmarker = ty + cliqheight_in_pixels * (itemno + 1)
# print('Slider vert from %d to %d' % (start_of_scrollmarker, end_of_scrollmarker))
MyOled.draw.rectangle(
(bx - vertslider_width_in_pixels, ty, bx,
ty + lineheight * total_items_per_windowful),
fill=255,
outline=255)
MyOled.draw.rectangle(
(bx - vertslider_width_in_pixels, start_of_scrollmarker, bx,
end_of_scrollmarker),
fill=0,
outline=255)
def draw_this_test_image():
# MyOled.image = Image.new('1', (MyOled.width, MyOled.height))
text = time.strftime("%A")
MyOled.draw.text((2, 2), text, font=MyOled.font14, fill=255)
text = time.strftime("%e %b %Y")
MyOled.draw.text((2, 18), text, font=MyOled.font14, fill=255)
# MyOled.add_buttons_labels(('hi', 'there', 'shutd'))
MyOled.refresh()
# text = time.strftime("%X")
# draw.text((2, 40), text, font=MyOled.fontb24, fill=255)
# def retrieve_stored_hotspot_pw(hotspot_name):
# '''
# '''
# global OUR_PW_DCT
# print('QQQ retrieve password for %s if we have it' % hotspot_name)
# if hotspot_name == ETHERNET_QUASIHOTSPOT_ENTRY:
# return None
# elif hotspot_name in OUR_PW_DCT.keys():
# return OUR_PW_DCT[hotspot_name]
# else:
# return None
def draw_simple_message_and_buttons_screen(textmsg, labelA, labelB, labelC):
print('Simple page - %s' % textmsg)
MyOled.draw.text((2, 2), textmsg, font=MyOled.fontb14, fill=255)
MyOled.add_buttons_labels((labelA, labelB, labelC))
def does_hotspot_require_password(hotspot_name):
if hotspot_name in ('From the Window', 'To the Walls',
'Orange Julius Caesar', 'Tangerine Idi Amin'
): # FIXME: does hotspot require password? Find out!
res = True
else:
res = False
return res
| gpl-3.0 |
giserh/pysparkling | pysparkling/rdd.py | 1 | 44048 | """
Provides a Python implementation of RDDs.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import io
import sys
import copy
import pickle
import random
import logging
import functools
import itertools
import subprocess
from collections import defaultdict
from . import utils
from .fileio import File, TextFile
from .stat_counter import StatCounter
from .cache_manager import CacheManager
from .exceptions import FileAlreadyExistsException
try:
from itertools import izip as zip # Python 2
except ImportError:
pass # Python 3
log = logging.getLogger(__name__)
class RDD(object):
"""
In Spark's original form, RDDs are Resilient, Distributed Datasets.
This class reimplements the same interface with the goal of being
fast on small data at the cost of being resilient and distributed.
:param partitions:
A list of instances of :class:`Partition`.
:param ctx:
An instance of the applicable :class:`Context`.
"""
def __init__(self, partitions, ctx):
self._p = partitions
self.context = ctx
self._name = None
self._rdd_id = ctx.newRddId()
def __getstate__(self):
r = dict((k, v) for k, v in self.__dict__.items())
r['_p'] = list(self.partitions())
r['context'] = None
return r
def compute(self, split, task_context):
"""split is a partition. This function is used in derived RDD
classes to add smarter behavior for specific cases."""
return split.x()
def partitions(self):
self._p, r = itertools.tee(self._p, 2)
return r
"""
Public API
----------
"""
def aggregate(self, zeroValue, seqOp, combOp):
"""
[distributed]
:param zeroValue:
The initial value to an aggregation, for example ``0`` or ``0.0``
for aggregating ``int`` s and ``float`` s, but any Python object is
possible. Can be ``None``.
:param seqOp:
A reference to a function that combines the current state with a
new value. In the first iteration, the current state is zeroValue.
:param combOp:
A reference to a function that combines outputs of seqOp.
In the first iteration, the current state is zeroValue.
:returns:
Output of ``combOp`` operations.
Example:
>>> from pysparkling import Context
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> Context().parallelize(
... [1, 2, 3, 4], 2
... ).aggregate((0, 0), seqOp, combOp)
(10, 4)
"""
return self.context.runJob(
self,
lambda tc, i: functools.reduce(
seqOp, i, copy.deepcopy(zeroValue)
),
resultHandler=lambda l: functools.reduce(
combOp, l, copy.deepcopy(zeroValue)
),
)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
[distributed]
:param zeroValue:
The initial value to an aggregation, for example ``0`` or ``0.0``
for aggregating ``int`` s and ``float`` s, but any Python object is
possible. Can be ``None``.
:param seqFunc:
A reference to a function that combines the current state with a
new value. In the first iteration, the current state is zeroValue.
:param combFunc:
A reference to a function that combines outputs of seqFunc.
In the first iteration, the current state is zeroValue.
:param numPartitions: (optional)
Not used.
:returns:
Output of ``combOp`` operations.
Example:
>>> from pysparkling import Context
>>> seqOp = (lambda x, y: x + y)
>>> combOp = (lambda x, y: x + y)
>>> r = Context().parallelize(
... [('a', 1), ('b', 2), ('a', 3), ('c', 4)]
... ).aggregateByKey(0, seqOp, combOp)
>>> (r['a'], r['b'])
(4, 2)
"""
def seqFuncByKey(tc, i):
r = defaultdict(lambda: copy.deepcopy(zeroValue))
for k, v in i:
r[k] = seqFunc(r[k], v)
return r
def combFuncByKey(l):
r = defaultdict(lambda: copy.deepcopy(zeroValue))
for p in l:
for k, v in p.items():
r[k] = combFunc(r[k], v)
return r
return self.context.runJob(self, seqFuncByKey,
resultHandler=combFuncByKey)
def cache(self):
"""
Whenever a partition is computed, cache the result.
Alias for :func:`RDD.persist`.
Example:
>>> from pysparkling import Context
>>> from pysparkling import CacheManager
>>>
>>> n_exec = 0
>>>
>>> def _map(e):
... global n_exec
... n_exec += 1
... return e*e
>>>
>>> my_rdd = Context().parallelize([1, 2, 3, 4], 2)
>>> my_rdd = my_rdd.map(_map).cache()
>>>
>>> logging.info('no exec until here')
>>> f = my_rdd.first()
>>> logging.info('available caches in {1}: {0}'.format(
... CacheManager.singleton().stored_idents(),
... CacheManager.singleton(),
... ))
>>>
>>> logging.info('executed map on first partition only so far')
>>> a = my_rdd.collect()
>>> logging.info('available caches in {1}: {0}'.format(
... CacheManager.singleton().stored_idents(),
... CacheManager.singleton(),
... ))
>>>
>>> logging.info('now _map() was executed on all partitions and should'
... 'not be executed again')
>>> logging.info('available caches in {1}: {0}'.format(
... CacheManager.singleton().stored_idents(),
... CacheManager.singleton(),
... ))
>>> (my_rdd.collect(), n_exec)
([1, 4, 9, 16], 4)
"""
return self.persist()
def cartesian(self, other):
"""
:param other:
Another RDD.
:returns:
A new RDD with the cartesian product of this RDD with ``other``.
.. note::
This is currently implemented as a local operation requiring
all data to be pulled on one machine.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
v1 = self.toLocalIterator()
v2 = self.collect()
return self.context.parallelize([(a, b) for a in v1 for b in v2])
def coalesce(self, numPartitions, shuffle=False):
"""
:param numPartitions:
Number of partitions in the resulting RDD.
:param shuffle: (optional)
Not used.
:returns:
A new RDD.
.. note::
This is currently implemented as a local operation requiring
all data to be pulled on one machine.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3], 2).coalesce(1).getNumPartitions()
1
"""
return self.context.parallelize(self.toLocalIterator(), numPartitions)
def collect(self):
"""
:returns:
The entire dataset as a list.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3]).collect()
[1, 2, 3]
"""
return self.context.runJob(
self, lambda tc, i: list(i),
resultHandler=lambda l: [x for p in l for x in p],
)
def count(self):
"""
:returns:
Number of entries in this dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3], 2).count()
3
"""
return self.context.runJob(self, lambda tc, i: sum(1 for _ in i),
resultHandler=sum)
def countApprox(self):
"""
Same as :func:`RDD.count()`.
"""
return self.count()
def countByKey(self):
"""
:returns:
A ``dict`` containing the count for every key.
Example:
>>> from pysparkling import Context
>>> Context().parallelize(
... [('a', 1), ('b', 2), ('b', 2)]
... ).countByKey()['b']
4
"""
def map_func(tc, x):
r = defaultdict(int)
for k, v in x:
r[k] += v
return r
return self.context.runJob(self, map_func,
resultHandler=utils.sum_counts_by_keys)
def countByValue(self):
"""
:returns:
A ``dict`` containing the count for every value.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 2, 4, 1]).countByValue()[2]
2
"""
def map_func(tc, x):
r = defaultdict(int)
for v in x:
r[v] += 1
return r
return self.context.runJob(self, map_func,
resultHandler=utils.sum_counts_by_keys)
def distinct(self, numPartitions=None):
"""
:param numPartitions:
The number of partitions of the newly created RDD.
:returns:
A new RDD containing only distict elements.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 2, 4, 1]).distinct().count()
3
"""
return self.context.parallelize(list(set(self.toLocalIterator())),
numPartitions)
def filter(self, f):
"""
:param f:
A reference to a function that if it evaluates to true when applied
to an element in the dataset, the element is kept.
:returns:
A new dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize(
... [1, 2, 2, 4, 1, 3, 5, 9], 3,
... ).filter(lambda x: x % 2 == 0).collect()
[2, 2, 4]
"""
def map_func(tc, i, x):
return (xx for xx in x if f(xx))
return MapPartitionsRDD(self, map_func, preservesPartitioning=True)
def first(self):
"""
:returns:
The first element in the dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 2, 4, 1, 3, 5, 9], 3).first()
1
Works also with empty partitions:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2], 20).first()
1
"""
return self.context.runJob(
self,
lambda tc, iterable: iterable,
allowLocal=True,
resultHandler=lambda l: next(itertools.chain.from_iterable(l)),
)
def flatMap(self, f, preservesPartitioning=True):
"""
A map operation followed by flattening.
:param f:
The map function.
:param preservesPartitioning: (optional)
Preserve the partitioning of the original RDD. Default True.
:returns:
A new RDD.
Example:
>>> from pysparkling import Context
>>> Context().parallelize(['hello', 'world']).flatMap(
... lambda x: [ord(ch) for ch in x]
... ).collect()
[104, 101, 108, 108, 111, 119, 111, 114, 108, 100]
"""
return MapPartitionsRDD(
self,
lambda tc, i, x: (e for xx in x for e in f(xx)),
preservesPartitioning=preservesPartitioning,
)
def flatMapValues(self, f):
"""
A map operation on the values in a (key, value) pair followed by a map.
:param f:
The map function.
:returns:
A new RDD.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([(1, 'hi'), (2, 'world')]).flatMapValues(
... lambda x: [ord(ch) for ch in x]
... ).collect()
[(1, 104), (1, 105), (2, 119), (2, 111), (2, 114), (2, 108), (2, 100)]
"""
return MapPartitionsRDD(
self,
lambda tc, i, x: ((xx[0], e) for xx in x for e in f(xx[1])),
preservesPartitioning=True,
)
def fold(self, zeroValue, op):
"""
:param zeroValue:
The inital value, for example ``0`` or ``0.0``.
:param op:
The reduce operation.
:returns:
The folded (or aggregated) value.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([4, 7, 2])
>>> my_rdd.fold(0, lambda a, b: a+b)
13
"""
return self.aggregate(zeroValue, op, op)
def foldByKey(self, zeroValue, op):
"""
:param zeroValue:
The inital value, for example ``0`` or ``0.0``.
:param op:
The reduce operation.
:returns:
The folded (or aggregated) value by key.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([('a', 4), ('b', 7), ('a', 2)])
>>> my_rdd.foldByKey(0, lambda a, b: a+b)['a']
6
"""
return self.aggregateByKey(zeroValue, op, op)
def foreach(self, f):
"""
Applies ``f`` to every element, but does not return a new RDD like
:func:`RDD.map()`.
:param f:
Apply a function to every element.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([1, 2, 3])
>>> a = []
>>> my_rdd.foreach(lambda x: a.append(x))
>>> len(a)
3
"""
self.context.runJob(self, lambda tc, x: [f(xx) for xx in x],
resultHandler=None)
def foreachPartition(self, f):
"""
Applies ``f`` to every partition, but does not return a new RDD like
:func:`RDD.mapPartitions()`.
:param f:
Apply a function to every partition.
"""
self.context.runJob(self, lambda tc, x: f(x),
resultHandler=None)
def getNumPartitions(self):
"""
:returns:
Returns the number of partitions.
"""
return sum(1 for _ in self.partitions())
def getPartitions(self):
"""
:returns:
The partitions of this RDD.
"""
return self.partitions()
def groupBy(self, f, numPartitions=None):
"""
:param f:
Function returning a key given an element of the dataset.
:param numPartitions:
The number of partitions in the new grouped dataset.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([4, 7, 2])
>>> my_rdd.groupBy(lambda x: x % 2).collect()
[(0, [2, 4]), (1, [7])]
"""
return self.context.parallelize((
(k, [gg[1] for gg in g]) for k, g in itertools.groupby(
sorted(self.keyBy(f).collect()),
lambda e: e[0],
)
), numPartitions)
def groupByKey(self, numPartitions=None):
"""
:param numPartitions:
The number of partitions in the new grouped dataset.
.. note::
Creating the new RDD is currently implemented as a local operation.
"""
return self.context.parallelize((
(k, [gg[1] for gg in g]) for k, g in itertools.groupby(
sorted(self.collect()),
lambda e: e[0],
)
), numPartitions)
def histogram(self, buckets):
"""
:param buckets:
A list of bucket boundaries or an int for the number of buckets.
:returns:
A tuple (bucket_boundaries, histogram_values) where
bucket_boundaries is a list of length n+1 boundaries and
histogram_values is a list of length n with the values of each
bucket.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([0, 4, 7, 4, 10])
>>> b, h = my_rdd.histogram(10)
>>> h
[1, 0, 0, 0, 2, 0, 0, 1, 0, 0, 1]
"""
if isinstance(buckets, int):
num_buckets = buckets
stats = self.stats()
min_v = stats.min()
max_v = stats.max()
buckets = [min_v + float(i)*(max_v-min_v)/num_buckets
for i in range(num_buckets+1)]
h = [0 for _ in buckets]
for x in self.toLocalIterator():
for i, b in enumerate(zip(buckets[:-1], buckets[1:])):
if x >= b[0] and x < b[1]:
h[i] += 1
# make the last bin inclusive on the right
if x == buckets[-1]:
h[-1] += 1
return (buckets, h)
def id(self):
# not implemented yet
return None
def intersection(self, other):
"""
:param other:
The other dataset to do the intersection with.
:returns:
A new RDD containing the intersection of this and the other RDD.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> rdd1 = Context().parallelize([0, 4, 7, 4, 10])
>>> rdd2 = Context().parallelize([3, 4, 7, 4, 5])
>>> rdd1.intersection(rdd2).collect()
[4, 7]
"""
return self.context.parallelize(
list(set(self.toLocalIterator()) & set(other.toLocalIterator()))
)
def isCheckpointed(self):
return False
def join(self, other, numPartitions=None):
"""
:param other:
The other RDD.
:param numPartitions:
Number of partitions to create in the new RDD.
:returns:
A new RDD containing the join.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> rdd1 = Context().parallelize([(0, 1), (1, 1)])
>>> rdd2 = Context().parallelize([(2, 1), (1, 3)])
>>> rdd1.join(rdd2).collect()
[(1, (1, 3))]
"""
d1 = dict(self.collect())
d2 = dict(other.collect())
keys = set(d1.keys()) & set(d2.keys())
return self.context.parallelize((
(k, (d1[k], d2[k]))
for k in keys
), numPartitions)
def keyBy(self, f):
"""
:param f:
Function that returns a key from a dataset element.
:returns:
A new RDD containing the keyed data.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize([0, 4, 7, 4, 10])
>>> rdd.keyBy(lambda x: x % 2).collect()
[(0, 0), (0, 4), (1, 7), (0, 4), (0, 10)]
"""
return self.map(lambda e: (f(e), e))
def keys(self):
"""
:returns:
A new RDD containing the keys of the current RDD.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([(0, 1), (1, 1)]).keys().collect()
[0, 1]
"""
return self.map(lambda e: e[0])
def leftOuterJoin(self, other, numPartitions=None):
"""
:param other:
The other RDD.
:param numPartitions: (optional)
Number of partitions of the resulting RDD.
:returns:
A new RDD with the result of the join.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> rdd1 = Context().parallelize([(0, 1), (1, 1)])
>>> rdd2 = Context().parallelize([(2, 1), (1, 3)])
>>> rdd1.leftOuterJoin(rdd2).collect()
[(0, (1, None)), (1, (1, 3))]
"""
d1 = dict(self.collect())
d2 = dict(other.collect())
return self.context.parallelize((
(k, (d1[k], d2[k] if k in d2 else None))
for k in d1.keys()
), numPartitions)
def lookup(self, key):
"""
Return all the (key, value) pairs where the given key matches.
:param key:
The key to lookup.
:returns:
A list of matched (key, value) pairs.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([(0, 1), (1, 1), (1, 3)]).lookup(1)
[1, 3]
"""
return self.context.runJob(
self,
lambda tc, x: (xx[1] for xx in x if xx[0] == key),
resultHandler=lambda l: [e for ll in l for e in ll],
)
def map(self, f):
"""
:param f:
Map function.
:returns:
A new RDD with mapped values.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3]).map(lambda x: x+1).collect()
[2, 3, 4]
"""
return MapPartitionsRDD(
self,
lambda tc, i, x: (f(xx) for xx in x),
preservesPartitioning=True,
)
def mapPartitions(self, f, preservesPartitioning=False):
"""
:param f:
Map function.
:returns:
A new RDD with mapped partitions.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize([1, 2, 3, 4], 2)
>>> def f(iterator):
... yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
return MapPartitionsRDD(
self,
lambda tc, i, x: f(x),
preservesPartitioning=preservesPartitioning,
)
def mapValues(self, f):
"""
:param f:
Map function.
:returns:
A new RDD with mapped values.
"""
return MapPartitionsRDD(
self,
lambda tc, i, x: ((e[0], f(e[1])) for e in x),
preservesPartitioning=True,
)
def max(self):
"""
:returns:
The maximum element.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3, 4, 3, 2], 2).max() == 4
True
"""
return self.stats().max()
def mean(self):
"""
:returns:
The mean of this dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([0, 4, 7, 4, 10]).mean()
5.0
"""
return self.stats().mean()
def min(self):
"""
:returns:
The minimum element.
"""
return self.stats().min()
def name(self):
"""
:returns:
The name of the dataset.
"""
return self._name
def persist(self, storageLevel=None):
"""
Cache the results of computed partitions.
:param storageLevel:
Not used.
"""
return PersistedRDD(self, storageLevel=storageLevel)
def pipe(self, command, env={}):
"""
Run a command with the elements in the dataset as argument.
:param command:
Command line command to run.
:param env:
``dict`` of environment variables.
.. warning::
Unsafe for untrusted data.
Example:
>>> from pysparkling import Context
>>> piped = Context().parallelize(['0', 'hello', 'world']).pipe('echo')
>>> b'hello\\n' in piped.collect()
True
"""
return self.context.parallelize(subprocess.check_output(
[command]+x if isinstance(x, list) else [command, x]
) for x in self.collect())
def randomSplit(self, weights, seed=None):
"""
Split the RDD into a few RDDs according to the given weights.
.. note::
Creating the new RDDs is currently implemented as a local
operation.
:param weights:
Determines the relative lengths of the resulting RDDs.
:param seed:
Seed for random number generator.
:returns:
A list of RDDs.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize(range(500))
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], seed=42)
>>> (rdd1.count(), rdd2.count())
(199, 301)
"""
sum_weights = sum(weights)
boundaries = [0]
for w in weights:
boundaries.append(boundaries[-1] + w/sum_weights)
random.seed(seed)
lists = [[] for _ in weights]
for e in self.toLocalIterator():
r = random.random()
for i, lbub in enumerate(zip(boundaries[:-1], boundaries[1:])):
if r >= lbub[0] and r < lbub[1]:
lists[i].append(e)
return [self.context.parallelize(l) for l in lists]
def reduce(self, f):
"""
:param f:
A commutative and associative binary operator.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([0, 4, 7, 4, 10]).reduce(lambda a, b: a+b)
25
"""
return self.context.runJob(
self,
lambda tc, x: functools.reduce(f, x),
resultHandler=lambda x: functools.reduce(f, x),
)
def reduceByKey(self, f):
"""
:param f:
A commutative and associative binary operator.
.. note::
This operation includes a :func:`pysparkling.RDD.groupByKey()`
which is a local operation.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize([(0, 1), (1, 1), (1, 3)])
>>> rdd.reduceByKey(lambda a, b: a+b).collect()
[(0, 1), (1, 4)]
"""
return self.groupByKey().mapValues(lambda x: functools.reduce(f, x))
def repartition(self, numPartitions):
"""
:param numPartitions:
Number of partitions in new RDD.
:returns:
A new RDD.
.. note::
Creating the new RDD is currently implemented as a local operation.
"""
return self.context.parallelize(self.toLocalIterator(), numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
:param other:
The other RDD.
:param numPartitions: (optional)
Number of partitions of the resulting RDD.
:returns:
A new RDD with the result of the join.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> rdd1 = Context().parallelize([(0, 1), (1, 1)])
>>> rdd2 = Context().parallelize([(2, 1), (1, 3)])
>>> sorted(rdd1.rightOuterJoin(rdd2).collect())
[(1, (1, 3)), (2, (None, 1))]
"""
d1 = dict(self.collect())
d2 = dict(other.collect())
return self.context.parallelize((
(k, (d1[k] if k in d1 else None, d2[k]))
for k in d2.keys()
), numPartitions)
def sample(self, withReplacement, fraction, seed=None):
"""
:param withReplacement:
Not used.
:param fraction:
Specifies the probability that an element is sampled.
:param seed: (optional)
Seed for random number generator.
:returns:
Sampled RDD.
Example:
>>> from pysparkling import Context
>>> rdd = Context().parallelize(range(100))
>>> sampled = rdd.sample(False, 0.1, seed=5)
>>> all(s1 == s2 for s1, s2 in zip(sampled.collect(),
... sampled.collect()))
True
"""
return PartitionwiseSampledRDD(
self, fraction,
preservesPartitioning=True,
seed=seed,
)
def sampleStdev(self):
"""
:returns:
sample standard deviation
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
:returns:
sample variance
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def saveAsPickleFile(self, path, batchSize=10):
"""
.. warning::
The output of this function is incompatible with the PySpark
output as there is no pure Python way to write Sequence files.
Example:
>>> from pysparkling import Context
>>> from tempfile import NamedTemporaryFile
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> d = ['hello', 'world', 1, 2]
>>> rdd = Context().parallelize(d).saveAsPickleFile(tmpFile.name)
>>> 'hello' in Context().pickleFile(tmpFile.name).collect()
True
"""
if File(path).exists():
raise FileAlreadyExistsException(
'Output {0} already exists.'.format(path)
)
codec_suffix = ''
if path.endswith(('.gz', '.bz2', '.lzo')):
codec_suffix = path[path.rfind('.'):]
def _map(path, obj):
stream = io.BytesIO()
pickle.dump(self.collect(), stream)
stream.seek(0)
File(path).dump(stream)
if self.getNumPartitions() == 1:
_map(path, self.collect())
return self
self.context.runJob(
self,
lambda tc, x: _map(
path+'/part-{0:05d}{1}'.format(tc.partitionId(), codec_suffix),
list(x),
),
resultHandler=lambda l: list(l),
)
TextFile(path+'/_SUCCESS').dump()
return self
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
If the RDD has many partitions, the contents will be stored directly
in the given path. If the RDD has more partitions, the data of the
partitions are stored in individual files under ``path/part-00000`` and
so on and once all partitions are written, the file ``path/_SUCCESS``
is written last.
:param path:
Destination of the text file.
:param compressionCodecClass:
Not used.
:returns:
``self``
"""
if TextFile(path).exists():
raise FileAlreadyExistsException(
'Output {0} already exists.'.format(path)
)
codec_suffix = ''
if path.endswith(('.gz', '.bz2', '.lzo')):
codec_suffix = path[path.rfind('.'):]
if self.getNumPartitions() == 1:
TextFile(
path
).dump(io.StringIO(''.join([
str(xx)+'\n' for xx in self.toLocalIterator()
])))
return self
self.context.runJob(
self,
lambda tc, x: TextFile(
path+'/part-{0:05d}{1}'.format(tc.partitionId(), codec_suffix)
).dump(io.StringIO(''.join([
str(xx)+'\n' for xx in x
]))),
resultHandler=lambda l: list(l),
)
TextFile(path+'/_SUCCESS').dump()
return self
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
:param keyfunc:
Returns the value that will be sorted.
:param ascending:
Default is True.
:param numPartitions:
Default is None. None means the output will have the same number of
partitions as the input.
:returns:
A new sorted RDD.
.. note::
Sorting is currently implemented as a local operation.
Examples:
>>> from pysparkling import Context
>>> rdd = Context().parallelize([5, 1, 2, 3])
>>> rdd.sortBy(lambda x: x).collect()
[1, 2, 3, 5]
>>> from pysparkling import Context
>>> rdd = Context().parallelize([1, 5, 2, 3])
>>> rdd.sortBy(lambda x: x, ascending=False).collect()
[5, 3, 2, 1]
"""
if numPartitions is None:
numPartitions = self.getNumPartitions()
return self.context.parallelize(
sorted(self.collect(), key=keyfunc, reverse=not ascending),
numPartitions,
)
def sortByKey(self, ascending=True, numPartitions=None,
keyfunc=lambda x: x[0]):
"""
:param ascending:
Default is True.
:param numPartitions:
Default is None. None means the output will have the same number of
partitions as the input.
:param keyfunc:
Returns the value that will be sorted.
:returns:
A new sorted RDD.
.. note::
Sorting is currently implemented as a local operation.
Examples:
>>> from pysparkling import Context
>>> rdd = Context().parallelize(
... [(5, 'a'), (1, 'b'), (2, 'c'), (3, 'd')]
... )
>>> rdd.sortByKey().collect()[0][1] == 'b'
True
>>> from pysparkling import Context
>>> rdd = Context().parallelize(
... [(1, 'b'), (5, 'a'), (2, 'c'), (3, 'd')]
... )
>>> rdd.sortByKey(ascending=False).collect()[0][1] == 'a'
True
"""
return self.sortBy(keyfunc, ascending, numPartitions)
def stats(self):
"""
:returns:
A :class:`pysparkling.StatCounter` instance.
Example:
>>> from pysparkling import Context
>>> d = [1, 4, 9, 16, 25, 36]
>>> s = Context().parallelize(d, 3).stats()
>>> sum(d)/len(d) == s.mean()
True
"""
return self.aggregate(
StatCounter(),
lambda a, b: a.merge(b),
lambda a, b: a.mergeStats(b),
)
def stdev(self):
"""
:returns:
standard deviation
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1.5, 2.5]).stdev()
0.5
"""
return self.stats().stdev()
def subtract(self, other, numPartitions=None):
"""
:param other:
The RDD to be subtracted from the current RDD.
:param numPartitions:
Currently not used. Partitions are preserved.
:returns:
New RDD.
Example:
>>> from pysparkling import Context
>>> rdd1 = Context().parallelize([(0, 1), (1, 1)])
>>> rdd2 = Context().parallelize([(1, 1), (1, 3)])
>>> rdd1.subtract(rdd2).collect()
[(0, 1)]
"""
list_other = other.collect()
return MapPartitionsRDD(
self,
lambda tc, i, x: (e for e in x if e not in list_other),
preservesPartitioning=True,
)
def sum(self):
"""
:returns:
The sum of all the elements.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([0, 4, 7, 4, 10]).sum()
25
"""
return self.context.runJob(self, lambda tc, x: sum(x),
resultHandler=sum)
def take(self, n):
"""
Only evaluates the partitions that are necessary to return n elements.
:param n:
Number of elements to return.
:returns:
Elements of the dataset in a list.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([4, 7, 2]).take(2)
[4, 7]
Another example where only the first two partitions only are computed
(check the debug logs):
>>> from pysparkling import Context
>>> Context().parallelize([4, 7, 2], 3).take(2)
[4, 7]
"""
return self.context.runJob(
self,
lambda tc, i: i,
allowLocal=True,
resultHandler=lambda l: list(itertools.islice(
itertools.chain.from_iterable(l),
n,
)),
)
def takeSample(self, n):
"""
Assumes samples are evenly distributed between partitions.
Only evaluates the partitions that are necessary to return n elements.
:param n:
The number of elements to sample.
:returns:
Samples from the dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([4, 7, 2]).takeSample(1)[0] in [4, 7, 2]
True
Another example where only one partition is computed
(check the debug logs):
>>> from pysparkling import Context
>>> d = [4, 9, 7, 3, 2, 5]
>>> Context().parallelize(d, 3).takeSample(1)[0] in d
True
"""
rnd_entries = sorted([random.random() for _ in range(n)])
num_partitions = sum(1 for _ in self.partitions())
rnd_entries = [
(
int(e*num_partitions), # partition number
e*num_partitions-int(e*num_partitions), # element in partition
)
for e in rnd_entries
]
partition_indices = [i for i, e in rnd_entries]
partitions = [p for i, p in enumerate(self.partitions())
if i in partition_indices]
def res_handler(l):
map_results = list(l)
entries = itertools.groupby(rnd_entries, lambda e: e[0])
r = []
for i, e_list in enumerate(entries):
p_result = map_results[i]
if not p_result:
continue
for p_num, e in e_list[1]:
e_num = int(e*len(p_result))
r.append(p_result[e_num])
return r
return self.context.runJob(
self, lambda tc, i: list(i), partitions=partitions,
resultHandler=res_handler,
)
def toLocalIterator(self):
"""
:returns:
An iterator over the dataset.
Example:
>>> from pysparkling import Context
>>> sum(Context().parallelize([4, 9, 7, 3, 2, 5], 3).toLocalIterator())
30
"""
return self.context.runJob(
self, lambda tc, i: list(i),
resultHandler=lambda l: (x for p in l for x in p),
)
def union(self, other):
"""
:param other:
The other RDD for the union.
:returns:
A new RDD.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([4, 9, 7, 3, 2, 5], 3)
>>> my_rdd.union(my_rdd).count()
12
"""
return self.context.union((self, other))
def values(self):
"""
:returns:
Values of a (key, value) dataset.
"""
return self.map(lambda e: e[1])
def variance(self):
"""
:returns:
The variance of the dataset.
Example:
>>> from pysparkling import Context
>>> Context().parallelize([1.5, 2.5]).variance()
0.25
"""
return self.stats().variance()
def zip(self, other):
"""
:param other:
Other dataset to zip with.
:returns:
New RDD with zipped entries.
.. note::
Creating the new RDD is currently implemented as a local operation.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([4, 9, 7, 3, 2, 5], 3)
>>> my_rdd.zip(my_rdd).collect()
[(4, 4), (9, 9), (7, 7), (3, 3), (2, 2), (5, 5)]
"""
return self.context.parallelize(
zip(self.toLocalIterator(), other.toLocalIterator())
)
def zipWithUniqueId(self):
"""
This is a fast operation.
:returns:
New RDD where every entry is zipped with a unique index.
Example:
>>> from pysparkling import Context
>>> my_rdd = Context().parallelize([423, 234, 986, 5, 345], 3)
>>> my_rdd.zipWithUniqueId().collect()
[(423, 0), (234, 1), (986, 4), (5, 2), (345, 5)]
"""
num_p = self.getNumPartitions()
return MapPartitionsRDD(
self,
lambda tc, i, x: (
(xx, e*num_p+tc.partition_id) for e, xx in enumerate(x)
),
preservesPartitioning=True,
)
class MapPartitionsRDD(RDD):
def __init__(self, prev, f, preservesPartitioning=False):
"""prev is the previous RDD.
f is a function with the signature
(task_context, partition index, iterator over elements).
"""
RDD.__init__(self, prev.partitions(), prev.context)
self.prev = prev
self.f = f
self.preservesPartitioning = preservesPartitioning
def compute(self, split, task_context):
return self.f(task_context, split.index,
self.prev.compute(split, task_context._create_child()))
def partitions(self):
return self.prev.partitions()
class PartitionwiseSampledRDD(RDD):
def __init__(self, prev, fraction, preservesPartitioning=False, seed=None):
"""prev is the previous RDD.
f is a function with the signature
(task_context, partition index, iterator over elements).
"""
RDD.__init__(self, prev.partitions(), prev.context)
if not seed:
seed = random.randint(0, sys.maxint)
self.prev = prev
self.fraction = fraction
self.preservesPartitioning = preservesPartitioning
self.seed = seed
def compute(self, split, task_context):
random.seed(self.seed+split.index)
return (
x for x in self.prev.compute(split, task_context._create_child())
if random.random() < self.fraction
)
def partitions(self):
return self.prev.partitions()
class PersistedRDD(RDD):
def __init__(self, prev, storageLevel=None):
"""prev is the previous RDD.
"""
RDD.__init__(self, prev.partitions(), prev.context)
self.prev = prev
self.storageLevel = storageLevel
def compute(self, split, task_context):
if self._rdd_id is None or split.index is None:
cid = None
else:
cid = '{0}:{1}'.format(self._rdd_id, split.index)
cm = CacheManager.singleton()
if not cm.has(cid):
cm.add(
cid,
list(self.prev.compute(split, task_context._create_child())),
self.storageLevel
)
return iter(cm.get(cid))
| mit |
robjwells/adventofcode-solutions | 2015/python/2015-07.py | 1 | 3324 | #!/usr/bin/env python3
"""Advent of Code 2015, Day 7: Some Assembly Required"""
class Circuit:
"""A set of wires connected with bitwise logic gates"""
def __init__(self, instructions):
"""Parse instructions into a circuit layout
instructions should be the text from the puzzle input without
any processing.
The wire signals are not 'solved' at this stage.
"""
wires = [line.split(' -> ') for line in instructions.splitlines()]
self._wires = {w: s for s, w in wires}
def _solve(self, wire):
"""Return the signal provided to a wire
The signal is discovered by recursively solving the circuit,
according to the instructions provided in init.
"""
value = self._wires.get(wire, wire) # In case wire is an int
try:
number = int(value)
# Just assigning is fairly quick instead of checking whether
# the value in the dictionary is still a string, but don't
# add extra keys that are just ints referencing themselves
if wire != number:
self._wires[wire] = number
return number
except ValueError:
# Wire needs solving
pass
parts = value.split()
if len(parts) == 1:
result = self._solve(*parts) # Another wire
if len(parts) == 2:
# "NOT": Invert 16-bit unsigned integer
result = 65535 - self._solve(parts[1])
elif len(parts) == 3:
left, op, right = parts
if op == 'AND':
result = self._solve(left) & self._solve(right)
elif op == 'OR':
result = self._solve(left) | self._solve(right)
elif op == 'LSHIFT':
result = self._solve(left) << int(right)
elif op == 'RSHIFT':
result = self._solve(left) >> int(right)
self._wires[wire] = result
return self._wires[wire]
def build(self):
"""Contruct the circuit so each wire has a signal"""
for wire in list(self._wires):
# list used to avoid 'dict changed size' error
if not isinstance(self._wires[wire], int):
self._solve(wire)
def __getitem__(self, key):
"""Allow indexing on wire identifier"""
return self._solve(key)
def __setitem__(self, key, value):
self._wires[key] = value
def test_circuit():
"""Test Circuit with some example instructions"""
instructions = '''\
123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i
'''
expected = dict([
('d', 72), ('e', 507), ('f', 492), ('g', 114),
('h', 65412), ('i', 65079), ('x', 123), ('y', 456)])
circuit = Circuit(instructions)
circuit.build() # Ensure each wire has a value
assert circuit._wires == expected
def main(puzzle_input):
first = Circuit(puzzle_input)
a_value = first['a']
print('Part one, signal on wire a:', a_value)
second = Circuit(puzzle_input)
second['b'] = a_value
print('Part two, signal on wire a after overriding b:', second['a'])
if __name__ == '__main__':
with open('../input/2015-07.txt') as f:
puzzle_input = f.read()
main(puzzle_input)
| mit |
Cinntax/home-assistant | tests/components/statistics/test_sensor.py | 3 | 13029 | """The test for the statistics sensor platform."""
import unittest
import statistics
import pytest
from homeassistant.setup import setup_component
from homeassistant.components.statistics.sensor import StatisticsSensor
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, TEMP_CELSIUS, STATE_UNKNOWN
from homeassistant.util import dt as dt_util
from tests.common import get_test_home_assistant
from unittest.mock import patch
from datetime import datetime, timedelta
from tests.common import init_recorder_component
from homeassistant.components import recorder
class TestStatisticsSensor(unittest.TestCase):
"""Test the Statistics sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.values = [17, 20, 15.2, 5, 3.8, 9.2, 6.7, 14, 6]
self.count = len(self.values)
self.min = min(self.values)
self.max = max(self.values)
self.total = sum(self.values)
self.mean = round(sum(self.values) / len(self.values), 2)
self.median = round(statistics.median(self.values), 2)
self.deviation = round(statistics.stdev(self.values), 2)
self.variance = round(statistics.variance(self.values), 2)
self.change = round(self.values[-1] - self.values[0], 2)
self.average_change = round(self.change / (len(self.values) - 1), 2)
self.change_rate = round(self.average_change / (60 * (self.count - 1)), 2)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_binary_sensor_source(self):
"""Test if source is a sensor."""
values = ["on", "off", "on", "off", "on", "off", "on"]
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "binary_sensor.test_monitored",
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in values:
self.hass.states.set("binary_sensor.test_monitored", value)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert str(len(values)) == state.state
def test_sensor_source(self):
"""Test if source is a sensor."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert str(self.mean) == state.state
assert self.min == state.attributes.get("min_value")
assert self.max == state.attributes.get("max_value")
assert self.variance == state.attributes.get("variance")
assert self.median == state.attributes.get("median")
assert self.deviation == state.attributes.get("standard_deviation")
assert self.mean == state.attributes.get("mean")
assert self.count == state.attributes.get("count")
assert self.total == state.attributes.get("total")
assert "°C" == state.attributes.get("unit_of_measurement")
assert self.change == state.attributes.get("change")
assert self.average_change == state.attributes.get("average_change")
def test_sampling_size(self):
"""Test rotation."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 5,
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
assert 3.8 == state.attributes.get("min_value")
assert 14 == state.attributes.get("max_value")
def test_sampling_size_1(self):
"""Test validity of stats requiring only one sample."""
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 1,
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in self.values[-3:]: # just the last 3 will do
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
state = self.hass.states.get("sensor.test")
# require only one data point
assert self.values[-1] == state.attributes.get("min_value")
assert self.values[-1] == state.attributes.get("max_value")
assert self.values[-1] == state.attributes.get("mean")
assert self.values[-1] == state.attributes.get("median")
assert self.values[-1] == state.attributes.get("total")
assert 0 == state.attributes.get("change")
assert 0 == state.attributes.get("average_change")
# require at least two data points
assert STATE_UNKNOWN == state.attributes.get("variance")
assert STATE_UNKNOWN == state.attributes.get("standard_deviation")
def test_max_age(self):
"""Test value deprecation."""
mock_data = {"return_time": datetime(2017, 8, 2, 12, 23, tzinfo=dt_util.UTC)}
def mock_now():
return mock_data["return_time"]
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"max_age": {"minutes": 3},
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value one minute later
mock_data["return_time"] += timedelta(minutes=1)
state = self.hass.states.get("sensor.test")
assert 6 == state.attributes.get("min_value")
assert 14 == state.attributes.get("max_value")
def test_change_rate(self):
"""Test min_age/max_age and change_rate."""
mock_data = {
"return_time": datetime(2017, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
):
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
}
},
)
self.hass.start()
self.hass.block_till_done()
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value one minute later
mock_data["return_time"] += timedelta(minutes=1)
state = self.hass.states.get("sensor.test")
assert datetime(
2017, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC
) == state.attributes.get("min_age")
assert datetime(
2017, 8, 2, 12, 23 + self.count - 1, 42, tzinfo=dt_util.UTC
) == state.attributes.get("max_age")
assert self.change_rate == state.attributes.get("change_rate")
@pytest.mark.skip("Flaky in CI")
def test_initialize_from_database(self):
"""Test initializing the statistics from the database."""
# enable the recorder
init_recorder_component(self.hass)
# store some values
for value in self.values:
self.hass.states.set(
"sensor.test_monitored", value, {ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS}
)
self.hass.block_till_done()
# wait for the recorder to really store the data
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
# only now create the statistics component, so that it must read the
# data from the database
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 100,
}
},
)
self.hass.start()
self.hass.block_till_done()
# check if the result is as in test_sensor_source()
state = self.hass.states.get("sensor.test")
assert str(self.mean) == state.state
@pytest.mark.skip("Flaky in CI")
def test_initialize_from_database_with_maxage(self):
"""Test initializing the statistics from the database."""
mock_data = {
"return_time": datetime(2017, 8, 2, 12, 23, 42, tzinfo=dt_util.UTC)
}
def mock_now():
return mock_data["return_time"]
# Testing correct retrieval from recorder, thus we do not
# want purging to occur within the class itself.
def mock_purge(self):
return
# Set maximum age to 3 hours.
max_age = 3
# Determine what our minimum age should be based on test values.
expected_min_age = mock_data["return_time"] + timedelta(
hours=len(self.values) - max_age
)
# enable the recorder
init_recorder_component(self.hass)
with patch(
"homeassistant.components.statistics.sensor.dt_util.utcnow", new=mock_now
), patch.object(StatisticsSensor, "_purge_old", mock_purge):
# store some values
for value in self.values:
self.hass.states.set(
"sensor.test_monitored",
value,
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS},
)
self.hass.block_till_done()
# insert the next value 1 hour later
mock_data["return_time"] += timedelta(hours=1)
# wait for the recorder to really store the data
self.hass.data[recorder.DATA_INSTANCE].block_till_done()
# only now create the statistics component, so that it must read
# the data from the database
assert setup_component(
self.hass,
"sensor",
{
"sensor": {
"platform": "statistics",
"name": "test",
"entity_id": "sensor.test_monitored",
"sampling_size": 100,
"max_age": {"hours": max_age},
}
},
)
self.hass.block_till_done()
self.hass.start()
self.hass.block_till_done()
# check if the result is as in test_sensor_source()
state = self.hass.states.get("sensor.test")
assert expected_min_age == state.attributes.get("min_age")
# The max_age timestamp should be 1 hour before what we have right
# now in mock_data['return_time'].
assert mock_data["return_time"] == state.attributes.get("max_age") + timedelta(
hours=1
)
| apache-2.0 |
simbs/edx-platform | openedx/core/djangoapps/user_api/accounts/serializers.py | 26 | 11572 | """
Django REST Framework serializers for the User API Accounts sub-application
"""
from rest_framework import serializers
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.serializers import ReadOnlyFieldsSerializerMixin
from student.models import UserProfile, LanguageProficiency
from ..models import UserPreference
from .image_helpers import get_profile_image_urls_for_user
from . import (
ACCOUNT_VISIBILITY_PREF_KEY, ALL_USERS_VISIBILITY, PRIVATE_VISIBILITY,
)
PROFILE_IMAGE_KEY_PREFIX = 'image_url'
class LanguageProficiencySerializer(serializers.ModelSerializer):
"""
Class that serializes the LanguageProficiency model for account
information.
"""
class Meta(object):
model = LanguageProficiency
fields = ("code",)
def get_identity(self, data):
"""
This is used in bulk updates to determine the identity of an object.
The default is to use the id of an object, but we want to override that
and consider the language code to be the canonical identity of a
LanguageProficiency model.
"""
try:
return data.get('code', None)
except AttributeError:
return None
class UserReadOnlySerializer(serializers.Serializer):
"""
Class that serializes the User model and UserProfile model together.
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'configuration' arg up to the superclass
self.configuration = kwargs.pop('configuration', None)
if not self.configuration:
self.configuration = settings.ACCOUNT_VISIBILITY_CONFIGURATION
# Don't pass the 'custom_fields' arg up to the superclass
self.custom_fields = kwargs.pop('custom_fields', None)
super(UserReadOnlySerializer, self).__init__(*args, **kwargs)
def to_representation(self, user):
"""
Overwrite to_native to handle custom logic since we are serializing two models as one here
:param user: User object
:return: Dict serialized account
"""
profile = user.profile
data = {
"username": user.username,
"url": self.context.get('request').build_absolute_uri(
reverse('accounts_api', kwargs={'username': user.username})
),
"email": user.email,
# For backwards compatibility: Tables created after the upgrade to Django 1.8 will save microseconds.
# However, mobile apps are not expecting microsecond in the serialized value. If we set it to zero the
# DRF JSONEncoder will not include it in the serialized value.
# https://docs.djangoproject.com/en/1.8/ref/databases/#fractional-seconds-support-for-time-and-datetime-fields
"date_joined": user.date_joined.replace(microsecond=0),
"is_active": user.is_active,
"bio": AccountLegacyProfileSerializer.convert_empty_to_None(profile.bio),
"country": AccountLegacyProfileSerializer.convert_empty_to_None(profile.country.code),
"profile_image": AccountLegacyProfileSerializer.get_profile_image(
profile,
user,
self.context.get('request')
),
"time_zone": None,
"language_proficiencies": LanguageProficiencySerializer(
profile.language_proficiencies.all(),
many=True
).data,
"name": profile.name,
"gender": AccountLegacyProfileSerializer.convert_empty_to_None(profile.gender),
"goals": profile.goals,
"year_of_birth": profile.year_of_birth,
"level_of_education": AccountLegacyProfileSerializer.convert_empty_to_None(profile.level_of_education),
"mailing_address": profile.mailing_address,
"requires_parental_consent": profile.requires_parental_consent(),
"account_privacy": self._get_profile_visibility(profile, user),
}
return self._filter_fields(
self._visible_fields(profile, user),
data
)
def _visible_fields(self, user_profile, user):
"""
Return what fields should be visible based on user settings
:param user_profile: User profile object
:param user: User object
:return: whitelist List of fields to be shown
"""
if self.custom_fields:
return self.custom_fields
profile_visibility = self._get_profile_visibility(user_profile, user)
if profile_visibility == ALL_USERS_VISIBILITY:
return self.configuration.get('shareable_fields')
else:
return self.configuration.get('public_fields')
def _get_profile_visibility(self, user_profile, user):
"""Returns the visibility level for the specified user profile."""
if user_profile.requires_parental_consent():
return PRIVATE_VISIBILITY
# Calling UserPreference directly because the requesting user may be different from existing_user
# (and does not have to be is_staff).
profile_privacy = UserPreference.get_value(user, ACCOUNT_VISIBILITY_PREF_KEY)
return profile_privacy if profile_privacy else self.configuration.get('default_visibility')
def _filter_fields(self, field_whitelist, serialized_account):
"""
Filter serialized account Dict to only include whitelisted keys
"""
visible_serialized_account = {}
for field_name in field_whitelist:
visible_serialized_account[field_name] = serialized_account.get(field_name, None)
return visible_serialized_account
class AccountUserSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of User model needed for account information.
"""
class Meta(object):
model = User
fields = ("username", "email", "date_joined", "is_active")
read_only_fields = ("username", "email", "date_joined", "is_active")
explicit_read_only_fields = ()
class AccountLegacyProfileSerializer(serializers.HyperlinkedModelSerializer, ReadOnlyFieldsSerializerMixin):
"""
Class that serializes the portion of UserProfile model needed for account information.
"""
profile_image = serializers.SerializerMethodField("_get_profile_image")
requires_parental_consent = serializers.SerializerMethodField()
language_proficiencies = LanguageProficiencySerializer(many=True, required=False)
class Meta(object):
model = UserProfile
fields = (
"name", "gender", "goals", "year_of_birth", "level_of_education", "country",
"mailing_address", "bio", "profile_image", "requires_parental_consent", "language_proficiencies"
)
# Currently no read-only field, but keep this so view code doesn't need to know.
read_only_fields = ()
explicit_read_only_fields = ("profile_image", "requires_parental_consent")
def validate_name(self, new_name):
""" Enforce minimum length for name. """
if len(new_name) < NAME_MIN_LENGTH:
raise serializers.ValidationError(
"The name field must be at least {} characters long.".format(NAME_MIN_LENGTH)
)
return new_name
def validate_language_proficiencies(self, value):
""" Enforce all languages are unique. """
language_proficiencies = [language for language in value]
unique_language_proficiencies = set(language["code"] for language in language_proficiencies)
if len(language_proficiencies) != len(unique_language_proficiencies):
raise serializers.ValidationError("The language_proficiencies field must consist of unique languages")
return value
def transform_gender(self, user_profile, value): # pylint: disable=unused-argument
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_country(self, user_profile, value): # pylint: disable=unused-argument
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_level_of_education(self, user_profile, value): # pylint: disable=unused-argument
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
def transform_bio(self, user_profile, value): # pylint: disable=unused-argument
""" Converts empty string to None, to indicate not set. Replaced by to_representation in version 3. """
return AccountLegacyProfileSerializer.convert_empty_to_None(value)
@staticmethod
def convert_empty_to_None(value):
""" Helper method to convert empty string to None (other values pass through). """
return None if value == "" else value
@staticmethod
def get_profile_image(user_profile, user, request=None):
""" Returns metadata about a user's profile image. """
data = {'has_image': user_profile.has_profile_image}
urls = get_profile_image_urls_for_user(user, request)
data.update({
'{image_key_prefix}_{size}'.format(image_key_prefix=PROFILE_IMAGE_KEY_PREFIX, size=size_display_name): url
for size_display_name, url in urls.items()
})
return data
def get_requires_parental_consent(self, user_profile):
""" Returns a boolean representing whether the user requires parental controls. """
return user_profile.requires_parental_consent()
def _get_profile_image(self, user_profile):
"""
Returns metadata about a user's profile image
This protected method delegates to the static 'get_profile_image' method
because 'serializers.SerializerMethodField("_get_profile_image")' will
call the method with a single argument, the user_profile object.
"""
return AccountLegacyProfileSerializer.get_profile_image(user_profile, user_profile.user)
def update(self, instance, validated_data):
"""
Update the profile, including nested fields.
"""
language_proficiencies = validated_data.pop("language_proficiencies", None)
# Update all fields on the user profile that are writeable,
# except for "language_proficiencies", which we'll update separately
update_fields = set(self.get_writeable_fields()) - set(["language_proficiencies"])
for field_name in update_fields:
default = getattr(instance, field_name)
field_value = validated_data.get(field_name, default)
setattr(instance, field_name, field_value)
instance.save()
# Now update the related language proficiency
if language_proficiencies is not None:
instance.language_proficiencies.all().delete()
instance.language_proficiencies.bulk_create([
LanguageProficiency(user_profile=instance, code=language["code"])
for language in language_proficiencies
])
return instance
| agpl-3.0 |
edmundgentle/schoolscript | SchoolScript/bin/Debug/pythonlib/Lib/filecmp.py | 3 | 9893 | """Utilities for comparing files and directories.
Classes:
dircmp
Functions:
cmp(f1, f2, shallow=True) -> int
cmpfiles(a, b, common) -> ([], [], [])
"""
import os
import stat
from itertools import filterfalse
__all__ = ["cmp", "dircmp", "cmpfiles"]
_cache = {}
BUFSIZE = 8*1024
def cmp(f1, f2, shallow=True):
"""Compare two files.
Arguments:
f1 -- First file name
f2 -- Second file name
shallow -- Just check stat signature (do not read the files).
defaults to 1.
Return value:
True if the files are the same, False otherwise.
This function uses a cache for past comparisons and the results,
with a cache invalidation mechanism relying on stale signatures.
"""
s1 = _sig(os.stat(f1))
s2 = _sig(os.stat(f2))
if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG:
return False
if shallow and s1 == s2:
return True
if s1[1] != s2[1]:
return False
outcome = _cache.get((f1, f2, s1, s2))
if outcome is None:
outcome = _do_cmp(f1, f2)
if len(_cache) > 100: # limit the maximum size of the cache
_cache.clear()
_cache[f1, f2, s1, s2] = outcome
return outcome
def _sig(st):
return (stat.S_IFMT(st.st_mode),
st.st_size,
st.st_mtime)
def _do_cmp(f1, f2):
bufsize = BUFSIZE
with open(f1, 'rb') as fp1, open(f2, 'rb') as fp2:
while True:
b1 = fp1.read(bufsize)
b2 = fp2.read(bufsize)
if b1 != b2:
return False
if not b1:
return True
# Directory comparison class.
#
class dircmp:
"""A class that manages the comparison of 2 directories.
dircmp(a, b, ignore=None, hide=None)
A and B are directories.
IGNORE is a list of names to ignore,
defaults to ['RCS', 'CVS', 'tags'].
HIDE is a list of names to hide,
defaults to [os.curdir, os.pardir].
High level usage:
x = dircmp(dir1, dir2)
x.report() -> prints a report on the differences between dir1 and dir2
or
x.report_partial_closure() -> prints report on differences between dir1
and dir2, and reports on common immediate subdirectories.
x.report_full_closure() -> like report_partial_closure,
but fully recursive.
Attributes:
left_list, right_list: The files in dir1 and dir2,
filtered by hide and ignore.
common: a list of names in both dir1 and dir2.
left_only, right_only: names only in dir1, dir2.
common_dirs: subdirectories in both dir1 and dir2.
common_files: files in both dir1 and dir2.
common_funny: names in both dir1 and dir2 where the type differs between
dir1 and dir2, or the name is not stat-able.
same_files: list of identical files.
diff_files: list of filenames which differ.
funny_files: list of files which could not be compared.
subdirs: a dictionary of dircmp objects, keyed by names in common_dirs.
"""
def __init__(self, a, b, ignore=None, hide=None): # Initialize
self.left = a
self.right = b
if hide is None:
self.hide = [os.curdir, os.pardir] # Names never to be shown
else:
self.hide = hide
if ignore is None:
self.ignore = ['RCS', 'CVS', 'tags'] # Names ignored in comparison
else:
self.ignore = ignore
def phase0(self): # Compare everything except common subdirectories
self.left_list = _filter(os.listdir(self.left),
self.hide+self.ignore)
self.right_list = _filter(os.listdir(self.right),
self.hide+self.ignore)
self.left_list.sort()
self.right_list.sort()
def phase1(self): # Compute common names
a = dict(zip(map(os.path.normcase, self.left_list), self.left_list))
b = dict(zip(map(os.path.normcase, self.right_list), self.right_list))
self.common = list(map(a.__getitem__, filter(b.__contains__, a)))
self.left_only = list(map(a.__getitem__, filterfalse(b.__contains__, a)))
self.right_only = list(map(b.__getitem__, filterfalse(a.__contains__, b)))
def phase2(self): # Distinguish files, directories, funnies
self.common_dirs = []
self.common_files = []
self.common_funny = []
for x in self.common:
a_path = os.path.join(self.left, x)
b_path = os.path.join(self.right, x)
ok = 1
try:
a_stat = os.stat(a_path)
except os.error as why:
# print('Can\'t stat', a_path, ':', why.args[1])
ok = 0
try:
b_stat = os.stat(b_path)
except os.error as why:
# print('Can\'t stat', b_path, ':', why.args[1])
ok = 0
if ok:
a_type = stat.S_IFMT(a_stat.st_mode)
b_type = stat.S_IFMT(b_stat.st_mode)
if a_type != b_type:
self.common_funny.append(x)
elif stat.S_ISDIR(a_type):
self.common_dirs.append(x)
elif stat.S_ISREG(a_type):
self.common_files.append(x)
else:
self.common_funny.append(x)
else:
self.common_funny.append(x)
def phase3(self): # Find out differences between common files
xx = cmpfiles(self.left, self.right, self.common_files)
self.same_files, self.diff_files, self.funny_files = xx
def phase4(self): # Find out differences between common subdirectories
# A new dircmp object is created for each common subdirectory,
# these are stored in a dictionary indexed by filename.
# The hide and ignore properties are inherited from the parent
self.subdirs = {}
for x in self.common_dirs:
a_x = os.path.join(self.left, x)
b_x = os.path.join(self.right, x)
self.subdirs[x] = dircmp(a_x, b_x, self.ignore, self.hide)
def phase4_closure(self): # Recursively call phase4() on subdirectories
self.phase4()
for sd in self.subdirs.values():
sd.phase4_closure()
def report(self): # Print a report on the differences between a and b
# Output format is purposely lousy
print('diff', self.left, self.right)
if self.left_only:
self.left_only.sort()
print('Only in', self.left, ':', self.left_only)
if self.right_only:
self.right_only.sort()
print('Only in', self.right, ':', self.right_only)
if self.same_files:
self.same_files.sort()
print('Identical files :', self.same_files)
if self.diff_files:
self.diff_files.sort()
print('Differing files :', self.diff_files)
if self.funny_files:
self.funny_files.sort()
print('Trouble with common files :', self.funny_files)
if self.common_dirs:
self.common_dirs.sort()
print('Common subdirectories :', self.common_dirs)
if self.common_funny:
self.common_funny.sort()
print('Common funny cases :', self.common_funny)
def report_partial_closure(self): # Print reports on self and on subdirs
self.report()
for sd in self.subdirs.values():
print()
sd.report()
def report_full_closure(self): # Report on self and subdirs recursively
self.report()
for sd in self.subdirs.values():
print()
sd.report_full_closure()
methodmap = dict(subdirs=phase4,
same_files=phase3, diff_files=phase3, funny_files=phase3,
common_dirs = phase2, common_files=phase2, common_funny=phase2,
common=phase1, left_only=phase1, right_only=phase1,
left_list=phase0, right_list=phase0)
def __getattr__(self, attr):
if attr not in self.methodmap:
raise AttributeError(attr)
self.methodmap[attr](self)
return getattr(self, attr)
def cmpfiles(a, b, common, shallow=True):
"""Compare common files in two directories.
a, b -- directory names
common -- list of file names found in both directories
shallow -- if true, do comparison based solely on stat() information
Returns a tuple of three lists:
files that compare equal
files that are different
filenames that aren't regular files.
"""
res = ([], [], [])
for x in common:
ax = os.path.join(a, x)
bx = os.path.join(b, x)
res[_cmp(ax, bx, shallow)].append(x)
return res
# Compare two files.
# Return:
# 0 for equal
# 1 for different
# 2 for funny cases (can't stat, etc.)
#
def _cmp(a, b, sh, abs=abs, cmp=cmp):
try:
return not abs(cmp(a, b, sh))
except os.error:
return 2
# Return a copy with items that occur in skip removed.
#
def _filter(flist, skip):
return list(filterfalse(skip.__contains__, flist))
# Demonstration and testing.
#
def demo():
import sys
import getopt
options, args = getopt.getopt(sys.argv[1:], 'r')
if len(args) != 2:
raise getopt.GetoptError('need exactly two args', None)
dd = dircmp(args[0], args[1])
if ('-r', '') in options:
dd.report_full_closure()
else:
dd.report()
if __name__ == '__main__':
demo()
| gpl-2.0 |
charanpald/sandbox | sandbox/predictors/edge/EgoEdgeLabelPredictor.py | 1 | 4133 |
import logging
import gc
from apgl.predictors.AbstractPredictor import AbstractPredictor
from exp.sandbox.predictors.edge.AbstractEdgeLabelPredictor import AbstractEdgeLabelPredictor
from sandbox.util.Util import Util
from sandbox.util.Parameter import Parameter
from apgl.data import *
import numpy
class EgoEdgeLabelPredictor(AbstractEdgeLabelPredictor):
"""
A class which splits the graph into ego networks and then makes predictions
assuming that all ego networks are independent.
"""
def __init__(self, alterRegressor, egoRegressor):
"""
The alterRegressor must be a primal method, since the number of alters
for each ego vary, and hence the dual vectors are not constant in size.
"""
Parameter.checkClass(alterRegressor, AbstractPredictor)
Parameter.checkClass(egoRegressor, AbstractPredictor)
self.alterRegressor = alterRegressor
self.egoRegressor = egoRegressor
def learnModel(self, graph):
"""
Learn a prediction model based on considering ego networks as independent.
For each ego, X contains a list of neighbours and the corresponding labels
are the values of the edge labels. We then find the set of primal weights
w for each ego network and then regress onto the set of weights using the
ego labels.
:param graph: The input graph to learn from.
:type graph: class:`apgl.graph.AbstractSingleGraph`
"""
logging.info("Learning model on graph of size " + str(graph.getNumVertices()))
logging.info("EgoLearner: " + str(self.egoRegressor))
logging.info("AlterLearner: " + str(self.alterRegressor))
allIndices = numpy.arange(0, graph.getNumVertices())
V = graph.getVertexList().getVertices(list(allIndices))
W = numpy.zeros((0, graph.getVertexList().getNumFeatures()))
Xe = numpy.zeros((0, graph.getVertexList().getNumFeatures()))
printStep = numpy.floor(graph.getNumVertices()/10)
alterError = 0.0
for i in range(graph.getNumVertices()):
Util.printIteration(i, printStep, graph.getNumVertices())
neighbours = graph.neighbours(i)
if neighbours.shape[0] != 0:
X = V[neighbours, :]
y = numpy.ones(X.shape[0])
for j in range(neighbours.shape[0]):
y[j] = graph.getEdge(i, neighbours[j])
w = self.alterRegressor.learnModel(X, y)
#alterError = numpy.mean(numpy.abs(self.alterRegressor.predict(X) - y))
W = numpy.r_[W, numpy.array([w])]
Xe = numpy.r_[Xe, numpy.array([V[i, :]])]
#Now we need to solve least to find regressor of Xe onto W
logging.info("Finding regression matrix onto weights using matrix of size " + str(Xe.shape))
gc.collect()
#self.standardiser = Standardiser()
#self.standardiser2 = Standardiser()
#Xe = self.standardiser.standardiseArray(Xe)
#W = self.standardiser2.standardiseArray(W)
self.egoRegressor.learnModel(Xe, W)
return W
def predictEdges(self, graph, edges):
"""
Make prediction given the edges and given graph.
:param edges: A numpy array consisting of the edges to make predictions over.
"""
Parameter.checkInt(graph.getVertexList().getNumFeatures(), 1, float('inf'))
logging.info("Making prediction over " + str(edges.shape[0]) + " edges")
predY = numpy.zeros(edges.shape[0])
for i in range(edges.shape[0]):
#Make a prediction for each ego-alter
egoInd = edges[i, 0]
alterInd = edges[i, 1]
ego = numpy.array([graph.getVertex(egoInd)])
#ego = self.standardiser.standardiseArray(ego)
c = self.egoRegressor.predict(ego)
#c = self.standardiser2.unstandardiseArray(c)
predY[i] = numpy.dot(graph.getVertex(alterInd), c.ravel())
return predY
#TODO: Write this
def classifyEdges(self, graph, edges):
pass
| gpl-3.0 |
logost/mbed | workspace_tools/host_tests/dev_null_auto.py | 122 | 1754 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class DevNullTest():
def check_readline(self, selftest, text):
""" Reads line from serial port and checks if text was part of read string
"""
result = False
c = selftest.mbed.serial_readline()
if c and text in c:
result = True
return result
def test(self, selftest):
result = True
# Test should print some text and later stop printing
# 'MBED: re-routing stdout to /null'
res = self.check_readline(selftest, "re-routing stdout to /null")
if not res:
# We haven't read preamble line
result = False
else:
# Check if there are printed characters
str = ''
for i in range(3):
c = selftest.mbed.serial_read(32)
if c is None:
return selftest.RESULT_IO_SERIAL
else:
str += c
if len(str) > 0:
result = False
break
selftest.notify("Received %d bytes: %s"% (len(str), str))
return selftest.RESULT_SUCCESS if result else selftest.RESULT_FAILURE
| apache-2.0 |
mhvk/astropy | astropy/coordinates/tests/utils.py | 5 | 1342 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from astropy import units as u
from astropy.utils import NumpyRNGContext
from astropy.utils.decorators import deprecated
# TODO: remove this function in v5.0. I think we can have a fairly fast
# deprecation cycle here because it is not meant to be public API.
@deprecated(since='v4.3',
message='This function has been deprecated in favor of the '
'public-facing utilities in '
'astropy.coordinates.angle_utilities',
alternative='Use uniform_spherical_random_surface() from '
'astropy.coordinates.angle_utilities instead.')
def randomly_sample_sphere(ntosample, randomseed=12345):
"""
Generates a set of spherical coordinates uniformly distributed over the
sphere in a way that gives the same answer for the same seed. Also
generates a random distance vector on [0, 1] (no units)
This simply returns (lon, lat, r) instead of a representation to avoid
failures due to the representation module.
"""
with NumpyRNGContext(randomseed):
lat = np.arcsin(np.random.rand(ntosample)*2-1)
lon = np.random.rand(ntosample)*np.pi*2
r = np.random.rand(ntosample)
return lon*u.rad, lat*u.rad, r
| bsd-3-clause |
Zhongqilong/kbengine | kbe/src/lib/python/Lib/test/test_codecmaps_jp.py | 60 | 1880 | #
# test_codecmaps_jp.py
# Codec mapping tests for Japanese encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestCP932Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp932'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/' \
'WINDOWS/CP932.TXT'
supmaps = [
(b'\x80', '\u0080'),
(b'\xa0', '\uf8f0'),
(b'\xfd', '\uf8f1'),
(b'\xfe', '\uf8f2'),
(b'\xff', '\uf8f3'),
]
for i in range(0xa1, 0xe0):
supmaps.append((bytes([i]), chr(i+0xfec0)))
class TestEUCJPCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jp'
mapfilename = 'EUC-JP.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JP.TXT'
class TestSJISCOMPATMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jis'
mapfilename = 'SHIFTJIS.TXT'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE' \
'/EASTASIA/JIS/SHIFTJIS.TXT'
pass_enctest = [
(b'\x81_', '\\'),
]
pass_dectest = [
(b'\\', '\xa5'),
(b'~', '\u203e'),
(b'\x81_', '\\'),
]
class TestEUCJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_jisx0213'
mapfilename = 'EUC-JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-JISX0213.TXT'
class TestSJISX0213Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'shift_jisx0213'
mapfilename = 'SHIFT_JISX0213.TXT'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/SHIFT_JISX0213.TXT'
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
cgranade/qutip | qutip/qip/algorithms/qft.py | 2 | 4734 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module provides the circuit implementation for Quantum Fourier Transform.
"""
__all__ = ['qft', 'qft_steps', 'qft_gate_sequence']
import numpy as np
import scipy.sparse as sp
from qutip.qobj import *
from qutip.qip.gates import snot, cphase, swap
from qutip.qip.circuit import QubitCircuit
def qft(N=1):
"""
Quantum Fourier Transform operator on N qubits.
Parameters
----------
N : int
Number of qubits.
Returns
-------
QFT: qobj
Quantum Fourier transform operator.
"""
if N < 1:
raise ValueError("Minimum value of N can be 1")
N2 = 2 ** N
phase = 2.0j * np.pi / N2
arr = np.arange(N2)
L, M = np.meshgrid(arr, arr)
L = phase * (L * M)
L = np.exp(L)
dims = [[2] * N, [2] * N]
return Qobj(1.0 / np.sqrt(N2) * L, dims=dims)
def qft_steps(N=1, swapping=True):
"""
Quantum Fourier Transform operator on N qubits returning the individual
steps as unitary matrices operating from left to right.
Parameters
----------
N: int
Number of qubits.
swap: boolean
Flag indicating sequence of swap gates to be applied at the end or not.
Returns
-------
U_step_list: list of qobj
List of Hadamard and controlled rotation gates implementing QFT.
"""
if N < 1:
raise ValueError("Minimum value of N can be 1")
U_step_list = []
if N == 1:
U_step_list.append(snot())
else:
for i in range(N):
for j in range(i):
U_step_list.append(cphase(np.pi / (2 ** (i - j)), N,
control=i, target=j))
U_step_list.append(snot(N, i))
if swapping is True:
for i in range(N // 2):
U_step_list.append(swap(N, [N - i - 1, i]))
return U_step_list
def qft_gate_sequence(N=1, swapping=True):
"""
Quantum Fourier Transform operator on N qubits returning the gate sequence.
Parameters
----------
N: int
Number of qubits.
swap: boolean
Flag indicating sequence of swap gates to be applied at the end or not.
Returns
-------
qc: instance of QubitCircuit
Gate sequence of Hadamard and controlled rotation gates implementing
QFT.
"""
if N < 1:
raise ValueError("Minimum value of N can be 1")
qc = QubitCircuit(N)
if N == 1:
qc.add_gate("SNOT", targets=[0])
else:
for i in range(N):
for j in range(i):
qc.add_gate(r"CPHASE", targets=[j], controls=[i],
arg_label=r"{\pi/2^{%d}}" % (i - j),
arg_value=np.pi / (2 ** (i - j)))
qc.add_gate("SNOT", targets=[i])
if swapping is True:
for i in range(N // 2):
qc.add_gate(r"SWAP", targets=[i], controls=[N - 1 - i])
return qc
| bsd-3-clause |
Lynn-015/NJU_DMRG | dmrg/hgen.py | 1 | 4476 | import numpy as np
from scipy.sparse import kron,identity
from copy import copy,deepcopy
from ops import Z,Zs
from utils import index_map
class HGen(object):
def __init__(self,terms,L,d=2,part='left',fermi=False,sfermi=False,sectors=np.array([0.5,-0.5])):
self.l=1;self.d=d;self.D=self.d
self.H=np.zeros([self.d,self.d])
self.terms=terms;self.pterms=[]
self.L=L;self.part=part
self.single_site_sectors=sectors
self.basis_sector_array=copy(self.single_site_sectors)
self.basis_by_sector=index_map(self.basis_sector_array)
if fermi==True:
self.I=Z
elif sfermi==True:
self.I=Zs
else:
self.I=identity(self.d)
if self.part=='left':
for term in self.terms:
if len(term.ops)==1 and (term.ops[0].site is None or term.ops[0].site==1):
self.H+=term.ops[0].mat*term.param
elif len(term.ops)>1 and (term.ops[0].site is None or term.ops[0].site==1):
pterm=deepcopy(term)
pterm.ops[0].site=1
pterm.current_index=0
pterm.current_op=deepcopy(pterm.ops[0])
for i in range(len(pterm.dists)): #if sites are given,this step can be skipped
pterm.ops[i+1].site=pterm.dists[i]+pterm.ops[i].site
self.pterms.append(pterm)
else:
for term in self.terms:
if len(term.ops)==1 and (term.ops[-1].site is None or term.ops[-1].site==self.L):
self.H+=term.ops[-1].mat*term.param
elif len(term.ops)>1 and (term.ops[-1].site is None or term.ops[-1].site==self.L):
pterm=deepcopy(term)
pterm.ops[-1].site=self.L
pterm.current_index=len(term.ops)-1
pterm.current_op=deepcopy(pterm.ops[-1])
for i in range(len(pterm.dists)):
pterm.ops[-i-2].site=pterm.ops[-i-1].site-pterm.dists[-i-1]
self.pterms.append(pterm)
def enlarge(self):
self.l+=1
if self.part=='left':
self.H=kron(self.H,identity(self.d))
pts=[]
for pterm in self.pterms:
if pterm.ops[pterm.current_index+1].site==self.l:
pterm.current_index+=1
pterm.current_op.mat=kron(pterm.current_op.mat,pterm.ops[pterm.current_index].mat) #other attribute?
else:
pterm.current_op.mat=kron(pterm.current_op.mat,self.I)
if pterm.current_index<len(pterm.ops)-1:
pts.append(pterm)
else:
self.H+=pterm.current_op.mat*pterm.param
self.pterms=deepcopy(pts)
for term in self.terms:
if len(term.ops)==1 and (term.ops[0].site is None or term.ops[0].site==self.l):
self.H+=kron(identity(self.D),term.ops[0].mat)*term.param
elif len(term.ops)>1 and (term.ops[0].site is None or term.ops[0].site==self.l):
pterm=deepcopy(term)
pterm.current_index=0
pterm.current_op=deepcopy(pterm.ops[0])
pterm.current_op.mat=kron(identity(self.D),pterm.current_op.mat)
pterm.ops[0].site=self.l
for i in range(len(pterm.dists)):
pterm.ops[i+1].site=pterm.dists[i]+pterm.ops[i].site
self.pterms.append(pterm)
self.basis_sector_array=np.add.outer(self.basis_sector_array,self.single_site_sectors).flatten()
else:
self.H=kron(identity(self.d),self.H)
pts=[]
for pterm in self.pterms:
if pterm.ops[pterm.current_index-1].site==self.L-self.l+1:
pterm.current_index-=1
pterm.current_op.mat=kron(pterm.ops[pterm.current_index].mat,pterm.current_op.mat)
else:
pterm.current_op.mat=kron(self.I,pterm.current_op.mat)
if pterm.current_index>0:
pts.append(pterm)
else:
self.H+=pterm.current_op.mat*pterm.param
self.pterms=deepcopy(pts)
for term in self.terms:
if len(term.ops)==1 and (term.ops[-1].site is None or term.ops[-1].site==self.L-self.l+1):
self.H+=kron(term.ops[-1].mat,identity(self.D))*term.param
elif len(term.ops)>1 and (term.ops[-1].site is None or term.ops[-1].site==self.L-self.l+1):
pterm=deepcopy(term)
pterm.current_index=len(pterm.ops)-1
pterm.current_op=deepcopy(pterm.ops[-1])
pterm.current_op.mat=kron(pterm.current_op.mat,identity(self.D))
pterm.ops[-1].site=self.L-self.l+1
for i in range(len(pterm.dists)):
pterm.ops[-i-2].site=pterm.ops[-i-1].site-pterm.dists[-i-1]
self.pterms.append(pterm)
self.basis_sector_array=np.add.outer(self.single_site_sectors,self.basis_sector_array).flatten()
self.basis_by_sector=index_map(self.basis_sector_array)
self.D*=self.d
def transform(self,T):
self.H=T.conjugate().transpose().dot(self.H.dot(T))
for pterm in self.pterms:
pterm.current_op.mat=T.conjugate().transpose().dot(pterm.current_op.mat.dot(T))
self.D=self.H.shape[0]
| mit |
tinfoilboy/Tiger | Tiger/shader_build.py | 1 | 2471 | import json
import os
import textwrap
import re
import sys
BASE_DIR = os.path.dirname(__file__)
# Global defines for the file paths
shaders_path = "shaders/"
shader_builder_class_path = "FileFragments/Shader/ShaderBuilderClass.part"
shader_method_path = "FileFragments/Shader/ShaderMethod.part"
shader_builder_header_file_path = 'OGL/ShaderBuilder.hpp'
# Global variables for the part files of the ShaderBuilder
shader_method_string = open(os.path.join(BASE_DIR, shader_method_path), 'r', newline='\n').read()
regex_space = re.compile(r'\t+|\r+|\v+')
indent_level = ' '
def build_shader_method(obj):
# The method string, use this so that we can safely replace
method_str = shader_method_string
vertex_shader_source = open(os.path.join(BASE_DIR, shaders_path + obj["file_name"] + ".vert"), 'r', newline='').read()
fragment_shader_source = open(os.path.join(BASE_DIR, shaders_path + obj["file_name"] + ".frag"), 'r', newline='').read()
vertex_shader_source = re.sub(regex_space, '', vertex_shader_source)
fragment_shader_source = re.sub(regex_space, '', fragment_shader_source)
vertex_shader_source = vertex_shader_source.replace('\n', '\\n')
fragment_shader_source = fragment_shader_source.replace('\n', '\\n')
method_str = method_str.replace("~shadername~", obj["name"])
method_str = method_str.replace("~vertsrc~", vertex_shader_source)
method_str = method_str.replace("~fragsrc~", fragment_shader_source)
indented_str = ''
for line in method_str.splitlines():
indented_str += indent_level + line + '\n'
return indented_str
def read_shaders_file():
# Load the shaders file data from the shaders.json file
with open(os.path.join(BASE_DIR, shaders_path + "shaders.json"), encoding='utf-8') as shaders_file:
shaders_data = json.load(shaders_file)
# The variable that stores the ShaderMethods built
shader_methods = ''
# Iterate through the Shader definitions, and generate a method for them
for obj in shaders_data["shaders"]:
shader_methods += build_shader_method(obj) + '\n'
shader_methods = shader_methods.rstrip()
shader_builder_class_string = open(os.path.join(BASE_DIR, shader_builder_class_path), 'r', newline='\n').read()
shader_builder_class_string = shader_builder_class_string.replace("~methods~", shader_methods)
with open(os.path.join(BASE_DIR, shader_builder_header_file_path), "w+") as builder_header:
builder_header.write(shader_builder_class_string)
builder_header.close()
read_shaders_file() | mit |
c-PRIMED/puq | puq/sweep.py | 1 | 13705 | """
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import time, os, re, h5py, sys, string
import numpy as np
from puq.testprogram import TestProgram
from numpy import ndarray
from puq.hdf import get_output_names
from logging import debug
from puq.util import vprint
from puq.options import options
from puq.jpickle import pickle, unpickle
from socket import gethostname
from puq.parameter import get_psamples
import getpass
from puq.calibrate import calibrate
# for python3
if sys.version[0] == "3":
raw_input = input
py3 = True
else:
py3 = False
_vcache = {}
_dcache = {}
class Sweep(object):
"""
Creates an object that contains all the information about
a parameter sweep.
Args:
psweep: Parameter Sweep object. See :class:`PSweep`.
host: Host object. See :class:`Host`.
prog: TestProgram object: See :class:`TestProgram`.
caldata(array: Experimental data for calibration. Optional.
calerr(float): Measurement error in the experimental data.
description(string): Optional description of this run.
"""
def __init__(self, psweep, host, prog, caldata=None, calerr=None, description=''):
self.host = host
self._reinit = False
if isinstance(prog, TestProgram):
self.prog = prog
else:
self.prog = TestProgram(prog)
if description == '':
description = self.prog.desc
self.description = description
# optional calibration data
self.caldata = caldata
self.err = calerr
# trying to get 10Hz resolution, 1 year clock
secperyear = 365*24*60*60
self.fname = 'sweep_%s' % int((time.time() % secperyear) * 10)
self.psweep = psweep
self.host.prog = self.prog
self.input_script = os.path.abspath(sys.argv[0])
def _save_hdf5(self):
debug('')
h5 = h5py.File(self.fname + '.hdf5')
# write HDF5 header information, once only
if 'version' not in h5.attrs:
h5.attrs['MEMOSA_UQ'] = b'MEMOSA'
h5.attrs['version'] = 201
# h5.attrs['id'] = self.id
h5.attrs['date'] = time.strftime("%b %d %H:%M %Z %Y", time.localtime())
h5.attrs['hostname'] = gethostname()
h5.attrs['username'] = getpass.getuser()
h5.attrs['UQtype'] = self.psweep.__class__.__name__.lower()
h5.attrs['description'] = self.description
# overwrite previous
if 'input' in h5:
del h5['input']
if 'private' in h5:
del h5['private']
hp = h5.require_group('private')
hp['sweep'] = pickle(self)
# in /input write the input params in json and regular arrays
h = h5.require_group('input')
# basic parameter table for non-python reading of the hdf5 file
h['param_array'] = np.column_stack([p.values for p in self.psweep.params])
if py3:
h['param_array'].attrs['name'] = [bytes(p.name, 'UTF-8') for p in self.psweep.params]
h['param_array'].attrs['description'] = [bytes(p.description, 'UTF-8') for p in self.psweep.params]
else:
h['param_array'].attrs['name'] = [str(p.name) for p in self.psweep.params]
h['param_array'].attrs['description'] = [str(p.description) for p in self.psweep.params]
# json-pickled parameters
h = h.require_group('params')
for p in self.psweep.params:
h[p.name] = pickle(p)
h[p.name].attrs['description'] = p.description
h[p.name].attrs['label'] = p.label
if hasattr(self.psweep, 'kde'):
h5['input/kde'] = pickle(self.psweep.kde)
# input script
if hasattr(self, 'input_script'):
h5['input/scriptname'] = str(self.input_script)
try:
h5['input/script'] = open(self.input_script).read()
except:
h5['input/script'] = "Source was unavailable."
h5.close()
def _save_and_run(self):
self._save_hdf5()
res = self.host.run()
if res:
self._save_hdf5()
return res
def run(self, fn=None, overwrite=False):
"""
Calls PSweep.run() to run all the jobs in the Sweep. Collect the data
from the outputs and call the PSweep analyze method. If the PSweep method
has an iterative callback defined, call it, otherwise return.
Args:
fn(string): HDF5 filename for output. '.hdf5' will be
appended to the filename if necessary. If fn is None,
a filename will be generated starting with "sweep\_"
followed by a timestamp.
overwrite(boolean): If True and fn is not None, will
silently overwrite any previous files of the same name.
Returns:
True on success.
"""
if fn is not None:
self.fname = os.path.splitext(fn)[0]
fn = self.fname + '.hdf5'
if os.path.exists(fn):
if not overwrite:
done = False
while 1:
ans = raw_input('%s already exists. Replace (Y/N):' % fn)
try:
if ans.upper() == 'N':
done = True
break
elif ans.upper() == 'Y':
break
except:
pass
print("Please answer with 'Y' or 'N'\n")
if done:
sys.exit(-1)
os.remove(fn)
vprint(1, 'Saving run to %s.hdf5' % self.fname)
return self.psweep.run(self)
def extend(self, num=None):
return self.psweep.extend(num)
def collect_data(self, hf=None):
""" Collects data from captured stdout files and puts it in arrays
in 'output/data'. Returns True on success.
"""
need_to_close = False
if hf is None:
hf = h5py.File(self.fname + '.hdf5')
need_to_close = True
finished_jobs = self.host.collect(hf)
self._extract_hdf5(hf, finished_jobs)
has_data = 'data' in hf['output']
if has_data:
outd = hf['output/data']
data = dict([(x, outd[x].value) for x in outd])
params = dict([(p.name, p.values) for p in self.psweep.params])
if need_to_close:
hf.close()
if not has_data and not self._reinit:
print("WARNING: There is no data in the output section!")
print("Check that your runs completed successfully.")
return False
return params, data
def analyze_errors(self, hf):
p = re.compile('Command exited with non-zero status \d+')
for job in hf['output/jobs']:
if job == 'time':
continue
err = hf['output/jobs/%s/stderr' % job].value
res = p.findall(err)
if res:
print("Job %s: %s" % (job, res[0]))
for line in err.split('\n'):
if line != res[0] and not line.startswith('HDF5:{'):
print(line)
elif len(err) == 0:
print("Job %s never completed. Walltime exceeded?" % job)
results = False
out = hf['output/jobs/%s/stdout' % job].value
for line in out.split('\n'):
if line.startswith('HDF5:{'):
results = True
break
if not results:
print("ERROR: Job %s has no output data in stdout." % job)
def analyze(self, verbose=False):
"""
Collects the output from all the jobs into an HDF5 file.
Parses any tagged data in the output and puts it in
the /data group in the HDF5 file.
"""
debug('')
hf = h5py.File(self.fname + '.hdf5')
if not self.host.status(quiet=1)[1]:
print("Cannot collect data or perform analysis until all jobs are completed.")
print("You should do 'puq resume' to resume jobs.")
sys.exit(-1)
# collect the data if it has not already been collected.
has_data = 'output' in hf and 'data' in hf['output']
if not has_data:
self.collect_data(hf)
try:
self.psweep.analyze(hf)
except:
print('Warning: analysis failed.')
errors = 1
# quick error check
if 'data' in hf['output']:
errors = 0
try:
options[self.psweep.__class__.__name__]['verbose'] = verbose
except KeyError:
options[self.psweep.__class__.__name__] = {'verbose': verbose}
for var in hf['output/data']:
if not isinstance(hf['output/data/%s' % var], h5py.Group):
tlen = len(hf['output/data/%s' % var].value)
num_jobs = len(hf['output/jobs'])
if 'time' in hf['output/jobs']:
num_jobs -= 1
if tlen != num_jobs:
errors += 1
print("Expected %s data points for variable %s, but got %s." % (num_jobs, var, tlen))
self.analyze_errors(hf)
return errors
if 'psamples' not in hf:
s = get_psamples(self.psweep.params)
if s is not None:
hf['psamples'] = s
# FIXME check for correlation if multiple outputs
# calibrate
if hasattr(self, 'caldata') and self.caldata is not None:
self._calibrate(hf)
hf.close()
self._save_hdf5()
return errors
# Bayesian Calibration
def _calibrate(self, hf):
ovar = get_output_names(hf)[0]
method = hf.attrs['UQtype']
rs = unpickle(hf["/%s/%s/response" % (method, ovar)].value)
# print "Calling calibrate from sweep"
self.psweep.params, self.psweep.kde = calibrate(self.psweep.params, self.caldata, self.err, rs.eval)
def _dump_hdf5_cache(self, hf, d):
global _vcache, _dcache
if len(_vcache):
if d:
dgrp = hf.require_group('output/data')
else:
dgrp = hf.require_group('output/jobs')
for n in _vcache:
if n in dgrp:
del dgrp[n]
adata = _vcache[n]
if d and len(adata.shape) > 1:
# Data is a multidimensional array and we want to do analysis
# on each array element individually. So we write them
# individually to /output/data
numvals = np.prod(adata.shape[1:])
for i, index in enumerate(np.ndindex(adata.shape[1:])):
name = '%s%s' % (n, [ind for ind in index])
data = adata.flatten()[i::numvals]
ds = dgrp.create_dataset(name, data=data)
ds.attrs["description"] = _dcache[n]
else:
ds = dgrp.create_dataset(n, data=adata)
ds.attrs["description"] = str(_dcache[n])
_vcache = {}
_dcache = {}
def _dump_hdf5(self, grp, line, job, mjob):
debug("Dump %s : %s" % (job, line))
global _vcache, _dcache
# old format used single quotes.
if line.startswith("{'"):
line = line.replace("'", '"')
x = unpickle(line)
v = x['value']
n = x['name']
if n not in _vcache:
if isinstance(v, ndarray):
_vcache[n] = np.empty([mjob] + list(v.shape))
else:
_vcache[n] = np.empty((mjob))
_vcache[n].fill(np.nan)
_dcache[n] = x['desc']
_vcache[n][job] = v
# Extract tagged data to hdf5
def _extract_hdf5(self, hf, jobs):
debug("Extract")
mjob = np.max(jobs) + 1
run_grp = hf.require_group('output/jobs')
for ext in ['out', 'err']:
for j in jobs:
grp = run_grp.require_group(str(j))
if not 'std%s' % ext in grp:
continue
f = grp['std%s' % ext].value
cont = False
for line in f.splitlines():
if cont:
line = line.strip()
cline += line
if line.endswith(':5FDH'):
cont = False
cline = cline[:-5]
self._dump_hdf5(grp, cline, j, mjob)
elif line.startswith('HDF5:'):
line = line[5:].strip()
if line.endswith(':5FDH'):
line = line[:-5]
self._dump_hdf5(grp, line, j, mjob)
else:
cont = True
cline = line
elif ext == 'err':
print('STDERR[job %d]: %s' % (j, line))
self._dump_hdf5_cache(hf, ext == 'out')
def resume(self):
if hasattr(self.host, 'jobs'):
self.host.run()
self._save_hdf5()
self.analyze()
else:
print("All jobs finished.")
| mit |
dpshelio/sunpy | sunpy/map/sources/iris.py | 2 | 2106 | import numpy as np
from sunpy.map import GenericMap
__all__ = ['SJIMap']
class SJIMap(GenericMap):
"""
A 2D IRIS Slit Jaw Imager Map.
The Interface Region Imaging Spectrograph (IRIS) small explorer spacecraft
provides simultaneous spectra and images of the photosphere, chromosphere,
transition region, and corona with 0.33 to 0.4 arcsec spatial resolution,
2-second temporal resolution and 1 km/s velocity resolution over a
field-of- view of up to 175 arcsec by 175 arcsec. IRIS consists of a 19-cm
UV telescope that feeds a slit-based dual-bandpass imaging spectrograph.
Slit-jaw images in four different passbands (C ii 1330, Si iv 1400,
Mg ii k 2796 and Mg ii wing 2830 A) can be taken simultaneously with
spectral rasters that sample regions up to 130 arcsec by 175 arcsec at a
variety of spatial samplings (from 0.33 arcsec and up).
IRIS is sensitive to emission from plasma at temperatures between
5000 K and 10 MK.
IRIS was launched into a Sun-synchronous orbit on 27 June 2013.
.. warning::
This object can only handle level 1 SJI files.
References
----------
* `IRIS Mission Page <https://iris.lmsal.com>`_
* `IRIS Analysis Guide <https://iris.lmsal.com/itn26/itn26.pdf>`_
* `IRIS Instrument Paper <https://doi.org/10.1007/s11207-014-0485-y>`_
"""
def __init__(self, data, header, **kwargs):
# Assume pixel units are arcesc if not given
header['cunit1'] = header.get('cunit1', 'arcsec')
header['cunit2'] = header.get('cunit2', 'arcsec')
GenericMap.__init__(self, data, header, **kwargs)
self.meta['detector'] = "SJI"
self.meta['waveunit'] = "Angstrom"
self.meta['wavelnth'] = header['twave1']
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an IRIS SJI image"""
tele = header.get('TELESCOP', '').startswith('IRIS')
obs = header.get('INSTRUME', '').startswith('SJI')
level = header.get('lvl_num') == 1
return tele and obs
| bsd-2-clause |
Smile-SA/odoo_addons | smile_access_control/tests/test_users.py | 1 | 3193 | # -*- coding: utf-8 -*-
# (C) 2011 Smile (<http://www.smile.fr>)
# License LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl).
from odoo.tests.common import TransactionCase
from odoo.exceptions import ValidationError
class TestUsers(TransactionCase):
def setUp(self):
super(TestUsers, self).setUp()
Users = self.env['res.users']
Groups = self.env['res.groups']
# Create groups
self.group1, self.group2 = map(
lambda index: Groups.create(
{'name': 'Group %d' % index}), range(1, 3))
# Create user profiles
self.user_profile1 = Users.create({
'name': 'Profile 1',
'login': 'profile1',
'is_user_profile': True,
'groups_id': [(4, self.group1.id)],
})
self.user_profile2 = Users.create({
'name': 'Profile 2',
'login': 'profile2',
'is_user_profile': True,
'groups_id': [(6, 0, (self.group1 | self.group2).ids)],
})
# Create users
self.user = Users.create({
'name': 'Demo User',
'login': 'demouser',
'user_profile_id': self.user_profile1.id,
})
def test_create(self):
"""
Test create method
We create a dictionary of values
We create a user from these values, he has a user profile
We check that that the new user has been created with his name
"""
userValue = {'name': 'User Test 1',
'login': 'usertest1',
'user_profile_id': self.user_profile2.id,
}
Users = self.env['res.users']
user_test = Users.create(userValue)
newUser = self.env['res.users'].browse(user_test.id)
self.assertEqual(userValue['name'], newUser['name'])
def test_write(self):
"""
Test write method
We use the user created in the first method
We change his user_profile_id
We check if the update has been done
"""
userEdited = self.env['res.users'].browse(
self.user.id).write({'user_profile_id': self.user_profile2.id})
self.assertEqual(userEdited, True)
def test_check_user_profile_id(self):
"""
Test _check_user_profile_id method
We try to create a user with admin as user profile
It raises a Validation Error
"""
userValue = {'name': 'User Test 1',
'login': 'usertest1',
'user_profile_id': self.env.ref('base.user_root').id,
}
with self.assertRaises(ValidationError):
self.env['res.users'].create(userValue)
def test_onchange_user_profile(self):
"""
Test onchange user profile method
We try to set the profile of an existing user to admin
It raises a Validation Error
"""
admin = self.env.ref('base.user_root').id
with self.assertRaises(ValidationError):
self.env['res.users'].browse(
self.user.id).write({'user_profile_id': admin})
| agpl-3.0 |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/contactpoint.py | 1 | 2156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ContactPoint) on 2019-05-07.
# 2019, SMART Health IT.
from . import element
class ContactPoint(element.Element):
""" Details of a Technology mediated contact point (phone, fax, email, etc.).
Details for all kinds of technology mediated contact points for a person or
organization, including telephone, email, etc.
"""
resource_type = "ContactPoint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.period = None
""" Time period when the contact point was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.rank = None
""" Specify preferred order of use (1 = highest).
Type `int`. """
self.system = None
""" phone | fax | email | pager | url | sms | other.
Type `str`. """
self.use = None
""" home | work | temp | old | mobile - purpose of this contact point.
Type `str`. """
self.value = None
""" The actual contact point details.
Type `str`. """
super(ContactPoint, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ContactPoint, self).elementProperties()
js.extend([
("period", "period", period.Period, False, None, False),
("rank", "rank", int, False, None, False),
("system", "system", str, False, None, False),
("use", "use", str, False, None, False),
("value", "value", str, False, None, False),
])
return js
import sys
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
| bsd-3-clause |
sanjeevtripurari/hue | desktop/core/ext-py/Django-1.6.10/django/middleware/http.py | 225 | 1670 | from django.utils.http import http_date, parse_http_date_safe
class ConditionalGetMiddleware(object):
"""
Handles conditional GET operations. If the response has a ETag or
Last-Modified header, and the request has If-None-Match or
If-Modified-Since, the response is replaced by an HttpNotModified.
Also sets the Date and Content-Length response-headers.
"""
def process_response(self, request, response):
response['Date'] = http_date()
if not response.streaming and not response.has_header('Content-Length'):
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
if_none_match = request.META.get('HTTP_IF_NONE_MATCH')
if if_none_match == response['ETag']:
# Setting the status is enough here. The response handling path
# automatically removes content for this status code (in
# http.conditional_content_removal()).
response.status_code = 304
if response.has_header('Last-Modified'):
if_modified_since = request.META.get('HTTP_IF_MODIFIED_SINCE')
if if_modified_since is not None:
if_modified_since = parse_http_date_safe(if_modified_since)
if if_modified_since is not None:
last_modified = parse_http_date_safe(response['Last-Modified'])
if last_modified is not None and last_modified <= if_modified_since:
# Setting the status code is enough here (same reasons as
# above).
response.status_code = 304
return response
| apache-2.0 |
csrwng/kubernetes | hack/boilerplate/boilerplate.py | 93 | 7447 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
extension = file_extension(filename)
if generated:
if extension == "go":
extension = "generatego"
elif extension == "bzl":
extension = "generatebzl"
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension in ["sh", "py"]:
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' %
filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' %
filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" %
filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['third_party', '_gopath', '_output', '.git', 'cluster/env.sh',
"vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test",
"staging/src/k8s.io/kubectl/pkg/generated/bindata.go"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = [
'hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def get_dates():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(get_dates())
# strip // +build \n\n build constraints
regexs["go_build_constraints"] = re.compile(
r"^(// \+build.*\n)+\n", re.MULTILINE)
# strip #!.* from scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile('DO NOT EDIT')
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 |
goldeneye-source/ges-python | lib/test/test_cmd_line.py | 72 | 19246 | # Tests invocation of the interpreter with various command line arguments
# Most tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.support, unittest
import os
import shutil
import sys
import subprocess
import tempfile
from test.script_helper import (spawn_python, kill_python, assert_python_ok,
assert_python_failure)
# XXX (ncoghlan): Move to script_helper and make consistent with run_python
def _kill_python_and_exit_code(p):
data = kill_python(p)
returncode = p.wait()
return data, returncode
class CmdLineTest(unittest.TestCase):
def test_directories(self):
assert_python_failure('.')
assert_python_failure('< .')
def verify_valid_flag(self, cmd_line):
rc, out, err = assert_python_ok(*cmd_line)
self.assertTrue(out == b'' or out.endswith(b'\n'))
self.assertNotIn(b'Traceback', out)
self.assertNotIn(b'Traceback', err)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
rc, out, err = assert_python_ok('-h')
self.assertIn(b'usage', out)
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
for switch in '-V', '--version':
rc, out, err = assert_python_ok(switch)
self.assertFalse(err.startswith(version))
self.assertTrue(out.startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an import to happen (for the output
# codec), a recursion loop can occur.
rc, out, err = assert_python_ok('-v')
self.assertNotIn(b'stack overflow', err)
rc, out, err = assert_python_ok('-vv')
self.assertNotIn(b'stack overflow', err)
def test_xoptions(self):
def get_xoptions(*args):
# use subprocess module directly because test.script_helper adds
# "-X faulthandler" to the command line
args = (sys.executable, '-E') + args
args += ('-c', 'import sys; print(sys._xoptions)')
out = subprocess.check_output(args)
opts = eval(out.splitlines()[0])
return opts
opts = get_xoptions()
self.assertEqual(opts, {})
opts = get_xoptions('-Xa', '-Xb=c,d=e')
self.assertEqual(opts, {'a': True, 'b': 'c,d=e'})
def test_showrefcount(self):
def run_python(*args):
# this is similar to assert_python_ok but doesn't strip
# the refcount from stderr. It can be replaced once
# assert_python_ok stops doing that.
cmd = [sys.executable]
cmd.extend(args)
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
p.stdout.close()
p.stderr.close()
rc = p.returncode
self.assertEqual(rc, 0)
return rc, out, err
code = 'import sys; print(sys._xoptions)'
# normally the refcount is hidden
rc, out, err = run_python('-c', code)
self.assertEqual(out.rstrip(), b'{}')
self.assertEqual(err, b'')
# "-X showrefcount" shows the refcount, but only in debug builds
rc, out, err = run_python('-X', 'showrefcount', '-c', code)
self.assertEqual(out.rstrip(), b"{'showrefcount': True}")
if hasattr(sys, 'gettotalrefcount'): # debug build
self.assertRegex(err, br'^\[\d+ refs, \d+ blocks\]')
else:
self.assertEqual(err, b'')
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
assert_python_failure('-m')
# Check we get an error for a nonexistent module
assert_python_failure('-m', 'fnord43520xyz')
# Check the runpy module also gives an error for
# a nonexistent module
assert_python_failure('-m', 'runpy', 'fnord43520xyz')
# All good if module is located and run successfully
assert_python_ok('-m', 'timeit', '-n', '1')
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = kill_python(p)
self.assertTrue(data.find(b'1 loop') != -1)
self.assertTrue(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
assert_python_failure('-c')
# Check we get an error for an uncaught exception
assert_python_failure('-c', 'raise Exception')
# All good if execution is successful
assert_python_ok('-c', 'pass')
@unittest.skipUnless(test.support.FS_NONASCII, 'need support.FS_NONASCII')
def test_non_ascii(self):
# Test handling of non-ascii data
command = ("assert(ord(%r) == %s)"
% (test.support.FS_NONASCII, ord(test.support.FS_NONASCII)))
assert_python_ok('-c', command)
# On Windows, pass bytes to subprocess doesn't test how Python decodes the
# command line, but how subprocess does decode bytes to unicode. Python
# doesn't decode the command line because Windows provides directly the
# arguments as unicode (using wmain() instead of main()).
@unittest.skipIf(sys.platform == 'win32',
'Windows has a native unicode API')
def test_undecodable_code(self):
undecodable = b"\xff"
env = os.environ.copy()
# Use C locale to get ascii for the locale encoding
env['LC_ALL'] = 'C'
code = (
b'import locale; '
b'print(ascii("' + undecodable + b'"), '
b'locale.getpreferredencoding())')
p = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = p.communicate()
if p.returncode == 1:
# _Py_char2wchar() decoded b'\xff' as '\udcff' (b'\xff' is not
# decodable from ASCII) and run_command() failed on
# PyUnicode_AsUTF8String(). This is the expected behaviour on
# Linux.
pattern = b"Unable to decode the command from the command line:"
elif p.returncode == 0:
# _Py_char2wchar() decoded b'\xff' as '\xff' even if the locale is
# C and the locale encoding is ASCII. It occurs on FreeBSD, Solaris
# and Mac OS X.
pattern = b"'\\xff' "
# The output is followed by the encoding name, an alias to ASCII.
# Examples: "US-ASCII" or "646" (ISO 646, on Solaris).
else:
raise AssertionError("Unknown exit code: %s, output=%a" % (p.returncode, stdout))
if not stdout.startswith(pattern):
raise AssertionError("%a doesn't start with %a" % (stdout, pattern))
@unittest.skipUnless(sys.platform == 'darwin', 'test specific to Mac OS X')
def test_osx_utf8(self):
def check_output(text):
decoded = text.decode('utf-8', 'surrogateescape')
expected = ascii(decoded).encode('ascii') + b'\n'
env = os.environ.copy()
# C locale gives ASCII locale encoding, but Python uses UTF-8
# to parse the command line arguments on Mac OS X
env['LC_ALL'] = 'C'
p = subprocess.Popen(
(sys.executable, "-c", "import sys; print(ascii(sys.argv[1]))", text),
stdout=subprocess.PIPE,
env=env)
stdout, stderr = p.communicate()
self.assertEqual(stdout, expected)
self.assertEqual(p.returncode, 0)
# test valid utf-8
text = 'e:\xe9, euro:\u20ac, non-bmp:\U0010ffff'.encode('utf-8')
check_output(text)
# test invalid utf-8
text = (
b'\xff' # invalid byte
b'\xc3\xa9' # valid utf-8 character
b'\xc3\xff' # invalid byte sequence
b'\xed\xa0\x80' # lone surrogate character (invalid)
)
check_output(text)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
for stream in ('stdout', 'stderr'):
# Binary is unbuffered
code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data, b'x', "binary %s not unbuffered" % stream)
# Text is line-buffered
code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data.strip(), b'x',
"text %s not line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works with '-u'
code = ("import sys; sys.stdout.write(sys.stdin.read(1))")
p = spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertTrue(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
path = path1 + os.pathsep + path2
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc, out, err = assert_python_ok('-S', '-c', code,
PYTHONPATH=path)
self.assertIn(path1.encode('ascii'), out)
self.assertIn(path2.encode('ascii'), out)
def test_empty_PYTHONPATH_issue16309(self):
# On Posix, it is documented that setting PATH to the
# empty string is equivalent to not setting PATH at all,
# which is an exception to the rule that in a string like
# "/bin::/usr/bin" the empty string in the middle gets
# interpreted as '.'
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc1, out1, err1 = assert_python_ok('-c', code, PYTHONPATH="")
rc2, out2, err2 = assert_python_ok('-c', code, __isolated=False)
# regarding to Posix specification, outputs should be equal
# for empty and unset PYTHONPATH
self.assertEqual(out1, out2)
def test_displayhook_unencodable(self):
for encoding in ('ascii', 'latin-1', 'utf-8'):
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(
[sys.executable, '-i'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
# non-ascii, surrogate, non-BMP printable, non-BMP unprintable
text = "a=\xe9 b=\uDC80 c=\U00010000 d=\U0010FFFF"
p.stdin.write(ascii(text).encode('ascii') + b"\n")
p.stdin.write(b'exit()\n')
data = kill_python(p)
escaped = repr(text).encode(encoding, 'backslashreplace')
self.assertIn(escaped, data)
def check_input(self, code, expected):
with tempfile.NamedTemporaryFile("wb+") as stdin:
sep = os.linesep.encode('ASCII')
stdin.write(sep.join((b'abc', b'def')))
stdin.flush()
stdin.seek(0)
with subprocess.Popen(
(sys.executable, "-c", code),
stdin=stdin, stdout=subprocess.PIPE) as proc:
stdout, stderr = proc.communicate()
self.assertEqual(stdout.rstrip(), expected)
def test_stdin_readline(self):
# Issue #11272: check that sys.stdin.readline() replaces '\r\n' by '\n'
# on Windows (sys.stdin is opened in binary mode)
self.check_input(
"import sys; print(repr(sys.stdin.readline()))",
b"'abc\\n'")
def test_builtin_input(self):
# Issue #11272: check that input() strips newlines ('\n' or '\r\n')
self.check_input(
"print(repr(input()))",
b"'abc'")
def test_output_newline(self):
# Issue 13119 Newline for print() should be \r\n on Windows.
code = """if 1:
import sys
print(1)
print(2)
print(3, file=sys.stderr)
print(4, file=sys.stderr)"""
rc, out, err = assert_python_ok('-c', code)
if sys.platform == 'win32':
self.assertEqual(b'1\r\n2\r\n', out)
self.assertEqual(b'3\r\n4', err)
else:
self.assertEqual(b'1\n2\n', out)
self.assertEqual(b'3\n4', err)
def test_unmached_quote(self):
# Issue #10206: python program starting with unmatched quote
# spewed spaces to stdout
rc, out, err = assert_python_failure('-c', "'")
self.assertRegex(err.decode('ascii', 'ignore'), 'SyntaxError')
self.assertEqual(b'', out)
def test_stdout_flush_at_shutdown(self):
# Issue #5319: if stdout.flush() fails at shutdown, an error should
# be printed out.
code = """if 1:
import os, sys
sys.stdout.write('x')
os.close(sys.stdout.fileno())"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', out)
self.assertRegex(err.decode('ascii', 'ignore'),
'Exception ignored in.*\nOSError: .*')
def test_closed_stdout(self):
# Issue #13444: if stdout has been explicitly closed, we should
# not attempt to flush it at shutdown.
code = "import sys; sys.stdout.close()"
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', err)
# Issue #7111: Python should work without standard streams
@unittest.skipIf(os.name != 'posix', "test needs POSIX semantics")
def _test_no_stdio(self, streams):
code = """if 1:
import os, sys
for i, s in enumerate({streams}):
if getattr(sys, s) is not None:
os._exit(i + 1)
os._exit(42)""".format(streams=streams)
def preexec():
if 'stdin' in streams:
os.close(0)
if 'stdout' in streams:
os.close(1)
if 'stderr' in streams:
os.close(2)
p = subprocess.Popen(
[sys.executable, "-E", "-c", code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec)
out, err = p.communicate()
self.assertEqual(test.support.strip_python_stderr(err), b'')
self.assertEqual(p.returncode, 42)
def test_no_stdin(self):
self._test_no_stdio(['stdin'])
def test_no_stdout(self):
self._test_no_stdio(['stdout'])
def test_no_stderr(self):
self._test_no_stdio(['stderr'])
def test_no_std_streams(self):
self._test_no_stdio(['stdin', 'stdout', 'stderr'])
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
hashes.append(out)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print("random is", sys.flags.hash_randomization)'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertIn(b'random is 1', out)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.support.TESTFN
self.addCleanup(test.support.unlink, filename)
with open(filename, "w") as script:
print("import sys", file=script)
print("del sys.modules['__main__']", file=script)
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
def test_isolatedmode(self):
self.verify_valid_flag('-I')
self.verify_valid_flag('-IEs')
rc, out, err = assert_python_ok('-I', '-c',
'from sys import flags as f; '
'print(f.no_user_site, f.ignore_environment, f.isolated)',
# dummyvar to prevent extranous -E
dummyvar="")
self.assertEqual(out.strip(), b'1 1 1')
with test.support.temp_cwd() as tmpdir:
fake = os.path.join(tmpdir, "uuid.py")
main = os.path.join(tmpdir, "main.py")
with open(fake, "w") as f:
f.write("raise RuntimeError('isolated mode test')\n")
with open(main, "w") as f:
f.write("import uuid\n")
f.write("print('ok')\n")
self.assertRaises(subprocess.CalledProcessError,
subprocess.check_output,
[sys.executable, main], cwd=tmpdir,
stderr=subprocess.DEVNULL)
out = subprocess.check_output([sys.executable, "-I", main],
cwd=tmpdir)
self.assertEqual(out.strip(), b"ok")
def test_main():
test.support.run_unittest(CmdLineTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
stefanv/aandete | app/lib/beaker/middleware.py | 3 | 6482 | import warnings
try:
from paste.registry import StackedObjectProxy
beaker_session = StackedObjectProxy(name="Beaker Session")
beaker_cache = StackedObjectProxy(name="Cache Manager")
except:
beaker_cache = None
beaker_session = None
from beaker.cache import CacheManager
from beaker.session import Session, SessionObject
from beaker.util import coerce_cache_params, coerce_session_params, \
parse_cache_config_options
class CacheMiddleware(object):
cache = beaker_cache
def __init__(self, app, config=None, environ_key='beaker.cache', **kwargs):
"""Initialize the Cache Middleware
The Cache middleware will make a CacheManager instance available
every request under the ``environ['beaker.cache']`` key by
default. The location in environ can be changed by setting
``environ_key``.
``config``
dict All settings should be prefixed by 'cache.'. This
method of passing variables is intended for Paste and other
setups that accumulate multiple component settings in a
single dictionary. If config contains *no cache. prefixed
args*, then *all* of the config options will be used to
intialize the Cache objects.
``environ_key``
Location where the Cache instance will keyed in the WSGI
environ
``**kwargs``
All keyword arguments are assumed to be cache settings and
will override any settings found in ``config``
"""
self.app = app
config = config or {}
self.options = {}
# Update the options with the parsed config
self.options.update(parse_cache_config_options(config))
# Add any options from kwargs, but leave out the defaults this
# time
self.options.update(
parse_cache_config_options(kwargs, include_defaults=False))
# Assume all keys are intended for cache if none are prefixed with
# 'cache.'
if not self.options and config:
self.options = config
self.options.update(kwargs)
self.cache_manager = CacheManager(**self.options)
self.environ_key = environ_key
def __call__(self, environ, start_response):
if environ.get('paste.registry'):
if environ['paste.registry'].reglist:
environ['paste.registry'].register(self.cache,
self.cache_manager)
environ[self.environ_key] = self.cache_manager
return self.app(environ, start_response)
class SessionMiddleware(object):
session = beaker_session
def __init__(self, wrap_app, config=None, environ_key='beaker.session',
**kwargs):
"""Initialize the Session Middleware
The Session middleware will make a lazy session instance
available every request under the ``environ['beaker.session']``
key by default. The location in environ can be changed by
setting ``environ_key``.
``config``
dict All settings should be prefixed by 'session.'. This
method of passing variables is intended for Paste and other
setups that accumulate multiple component settings in a
single dictionary. If config contains *no session. prefixed
args*, then *all* of the config options will be used to
intialize the Session objects.
``environ_key``
Location where the Session instance will keyed in the WSGI
environ
``**kwargs``
All keyword arguments are assumed to be session settings and
will override any settings found in ``config``
"""
config = config or {}
# Load up the default params
self.options = dict(invalidate_corrupt=True, type=None,
data_dir=None, key='beaker.session.id',
timeout=None, save_accessed_time=True, secret=None,
log_file=None)
# Pull out any config args meant for beaker session. if there are any
for dct in [config, kwargs]:
for key, val in dct.items():
if key.startswith('beaker.session.'):
self.options[key[15:]] = val
if key.startswith('session.'):
self.options[key[8:]] = val
if key.startswith('session_'):
warnings.warn('Session options should start with session. '
'instead of session_.', DeprecationWarning, 2)
self.options[key[8:]] = val
# Coerce and validate session params
coerce_session_params(self.options)
# Assume all keys are intended for session if none are prefixed with
# 'session.'
if not self.options and config:
self.options = config
self.options.update(kwargs)
self.wrap_app = self.app = wrap_app
self.environ_key = environ_key
def __call__(self, environ, start_response):
session = SessionObject(environ, **self.options)
if environ.get('paste.registry'):
if environ['paste.registry'].reglist:
environ['paste.registry'].register(self.session, session)
environ[self.environ_key] = session
environ['beaker.get_session'] = self._get_session
if 'paste.testing_variables' in environ and 'webtest_varname' in self.options:
environ['paste.testing_variables'][self.options['webtest_varname']] = session
def session_start_response(status, headers, exc_info=None):
if session.accessed():
session.persist()
if session.__dict__['_headers']['set_cookie']:
cookie = session.__dict__['_headers']['cookie_out']
if cookie:
headers.append(('Set-cookie', cookie))
return start_response(status, headers, exc_info)
return self.wrap_app(environ, session_start_response)
def _get_session(self):
return Session({}, use_cookies=False, **self.options)
def session_filter_factory(global_conf, **kwargs):
def filter(app):
return SessionMiddleware(app, global_conf, **kwargs)
return filter
def session_filter_app_factory(app, global_conf, **kwargs):
return SessionMiddleware(app, global_conf, **kwargs)
| bsd-3-clause |
zooniverse/aggregation | experimental/clusteringAlg/adaptiveDBSCAN.py | 2 | 4734 | #!/usr/bin/env python
__author__ = 'greg'
from sklearn.cluster import DBSCAN
import numpy as np
import math
def dist(c1,c2):
return math.sqrt((c1[0]-c2[0])**2 + (c1[1]-c2[1])**2)
class CannotSplit(Exception):
def __init__(self,value):
self.value = value
def __str__(self):
return ""
samples_needed = 3
def adaptiveDBSCAN(XYpts,user_ids):
if XYpts == []:
return []
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
#increase the epsilon until we don't have any nearby clusters corresponding to non-overlapping
#sets of users
X = np.array(XYpts)
#for epsilon in [5,10,15,20,25,30]:
for first_epsilon in [100,200,300,400]:
db = DBSCAN(eps=first_epsilon, min_samples=samples_needed).fit(X)
labels = db.labels_
pts_in_each_cluster = []
users_in_each_cluster = []
cluster_centers = []
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
cluster_centers.append((np.mean(xSet),np.mean(ySet)))
pts_in_each_cluster.append(pts_in_cluster[:])
users_in_each_cluster.append([u for u,l in zip(user_ids,labels) if l == k])
#do we have any adjacent clusters with non-overlapping sets of users
#if so, we should merge them by increasing the epsilon value
cluster_compare = []
for cluster_index, (c1,users) in enumerate(zip(cluster_centers,users_in_each_cluster)):
for cluster_index, (c2,users2) in enumerate(zip(cluster_centers[cluster_index+1:],users_in_each_cluster[cluster_index+1:])):
overlappingUsers = [u for u in users if u in users2]
cluster_compare.append((dist(c1,c2),overlappingUsers))
cluster_compare.sort(key = lambda x:x[0])
needToMerge = [] in [c[1] for c in cluster_compare[:10]]
if not(needToMerge):
break
#print epsilon
#print [c[1] for c in cluster_compare[:10]]
centers_to_return = []
assert not(needToMerge)
#do we need to split any clusters?
for cluster_index in range(len(cluster_centers)):
#print "splitting"
needToSplit = (sorted(users_in_each_cluster[cluster_index]) != sorted(list(set(users_in_each_cluster[cluster_index]))))
if needToSplit:
subcluster_centers = []
stillToSplit = []
X = np.array(pts_in_each_cluster[cluster_index])
#for epsilon in [30,25,20,15,10,5,1,0.1,0.01]:
for second_epsilon in range(200,1,-2):#[400,300,200,100,80,75,65,60,50,25,24,23,22,21,20,19,18,17,16,15,14,13,10,5,1]:
db = DBSCAN(eps=second_epsilon, min_samples=samples_needed).fit(X)
labels = db.labels_
subcluster_centers = []
needToSplit = False
for k in sorted(set(labels)):
if k == -1:
continue
class_member_mask = (labels == k)
users_in_subcluster = [u for u,l in zip(users_in_each_cluster[cluster_index],labels) if l == k]
needToSplit = (sorted(users_in_subcluster) != sorted(list(set(users_in_subcluster))))
if needToSplit:
stillToSplit = list(X[class_member_mask])
break
pts_in_cluster = list(X[class_member_mask])
xSet,ySet = zip(*pts_in_cluster)
subcluster_centers.append((np.mean(xSet),np.mean(ySet)))
if not(needToSplit):
break
if needToSplit:
print "second is " + str(second_epsilon)
print stillToSplit
for i in range(len(stillToSplit)):
p1 = stillToSplit[i]
for j in range(len(stillToSplit[i+1:])):
p2 = stillToSplit[j+i+1]
print math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2),
#print (i,j+i+1),
print
print X
print users_in_each_cluster[cluster_index]
raise CannotSplit(pts_in_each_cluster[cluster_index])
centers_to_return.extend(subcluster_centers)
#if needToSplit:
# print pts_in_each_cluster[cluster_index]
# print users_in_each_cluster[cluster_index]
#else:
else:
centers_to_return.append(cluster_centers[cluster_index])
return centers_to_return | apache-2.0 |
splunk/splunk-app-twitter | twitter2/bin/requests/packages/charade/langthaimodel.py | 206 | 11475 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| apache-2.0 |
Cojacfar/Maker | comm/lib/python2.7/site-packages/django/template/loaders/cached.py | 114 | 2619 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
from django.utils.encoding import force_bytes
class Loader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
# Set self._cached_loaders atomically. Otherwise, another thread
# could see an incomplete list. See #17303.
cached_loaders = []
for loader in self._loaders:
cached_loaders.append(find_template_loader(loader))
self._cached_loaders = cached_loaders
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = template_name
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([template_name, hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()])
try:
template = self.template_cache[key]
except KeyError:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return template, None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
| gpl-2.0 |
Guneet-Dhillon/mxnet | example/gluon/data.py | 8 | 5477 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
""" data iterator for mnist """
import os
import random
import sys
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, "../../tests/python/common"))
import get_data
import mxnet as mx
def mnist_iterator(batch_size, input_shape):
"""return train and val iterators for mnist"""
# download data
get_data.GetMNIST_ubyte()
flat = False if len(input_shape) == 3 else True
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=flat)
val_dataiter = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
input_shape=input_shape,
batch_size=batch_size,
flat=flat)
return (train_dataiter, val_dataiter)
def cifar10_iterator(batch_size, data_shape, resize=-1):
get_data.GetCifar10()
train = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/train.rec",
# mean_img = "data/cifar/mean.bin",
resize = resize,
data_shape = data_shape,
batch_size = batch_size,
rand_crop = True,
rand_mirror = True)
val = mx.io.ImageRecordIter(
path_imgrec = "data/cifar/test.rec",
# mean_img = "data/cifar/mean.bin",
resize = resize,
rand_crop = False,
rand_mirror = False,
data_shape = data_shape,
batch_size = batch_size)
return train, val
class DummyIter(mx.io.DataIter):
def __init__(self, batch_size, data_shape, batches = 5):
super(DummyIter, self).__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,)
self.provide_data = [('data', self.data_shape)]
self.provide_label = [('softmax_label', self.label_shape)]
self.batch = mx.io.DataBatch(data=[mx.nd.zeros(self.data_shape)],
label=[mx.nd.zeros(self.label_shape)])
self._batches = 0
self.batches = batches
def next(self):
if self._batches < self.batches:
self._batches += 1
return self.batch
else:
self._batches = 0
raise StopIteration
def dummy_iterator(batch_size, data_shape):
return DummyIter(batch_size, data_shape), DummyIter(batch_size, data_shape)
class ImagePairIter(mx.io.DataIter):
def __init__(self, path, data_shape, label_shape, batch_size=64, flag=0, input_aug=None, target_aug=None):
super(ImagePairIter, self).__init__(batch_size)
self.data_shape = (batch_size,) + data_shape
self.label_shape = (batch_size,) + label_shape
self.input_aug = input_aug
self.target_aug = target_aug
self.provide_data = [('data', self.data_shape)]
self.provide_label = [('label', self.label_shape)]
is_image_file = lambda fn: any(fn.endswith(ext) for ext in [".png", ".jpg", ".jpeg"])
self.filenames = [os.path.join(path, x) for x in os.listdir(path) if is_image_file(x)]
self.count = 0
self.flag = flag
random.shuffle(self.filenames)
def next(self):
from PIL import Image
if self.count + self.batch_size <= len(self.filenames):
data = []
label = []
for i in range(self.batch_size):
fn = self.filenames[self.count]
self.count += 1
image = Image.open(fn).convert('YCbCr').split()[0]
if image.size[0] > image.size[1]:
image = image.transpose(Image.TRANSPOSE)
image = mx.nd.expand_dims(mx.nd.array(image), axis=2)
target = image.copy()
for aug in self.input_aug:
image = aug(image)[0]
for aug in self.target_aug:
target = aug(target)[0]
data.append(image)
label.append(target)
data = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in data], dim=0)
label = mx.nd.concat(*[mx.nd.expand_dims(d, axis=0) for d in label], dim=0)
data = [mx.nd.transpose(data, axes=(0, 3, 1, 2)).astype('float32')/255]
label = [mx.nd.transpose(label, axes=(0, 3, 1, 2)).astype('float32')/255]
return mx.io.DataBatch(data=data, label=label)
else:
raise StopIteration
def reset(self):
self.count = 0
random.shuffle(self.filenames)
| apache-2.0 |
DarkFenX/Pyfa | eos/db/migrations/upgrade12.py | 4 | 13947 | """
Migration 12
- Converts modules based on March 2016 Module Tiericide
Some modules have been unpublished (and unpublished module attributes are removed
from database), which causes pyfa to crash. We therefore replace these
modules with their new replacements
"""
CONVERSIONS = {
16457: ( # Crosslink Compact Ballistic Control System
16459, # Muon Coil Bolt Array I
16461, # Multiphasic Bolt Array I
16463, # 'Pandemonium' Ballistic Enhancement
),
5281 : ( # Coadjunct Scoped Remote Sensor Booster
7218, # Piercing ECCM Emitter I
),
5365 : ( # Cetus Scoped Burst Jammer
5359, # 1Z-3 Subversive ECM Eruption
),
1973 : ( # Sensor Booster I
1947, # ECCM - Radar I
2002, # ECCM - Ladar I
2003, # ECCM - Magnetometric I
2004, # ECCM - Gravimetric I
2005, # ECCM - Omni I
),
1951 : ( # 'Basic' Tracking Enhancer
6322, # Beta-Nought Tracking Mode
6323, # Azimuth Descalloping Tracking Enhancer
6324, # F-AQ Delay-Line Scan Tracking Subroutines
6321, # Beam Parallax Tracking Program
),
521 : ( # 'Basic' Damage Control
5829, # GLFF Containment Field
5831, # Interior Force Field Array
5835, # F84 Local Damage System
5833, # Systematic Damage Control
),
22925: ( # 'Bootleg' Remote Sensor Booster
22939, # 'Boss' Remote Sensor Booster
22941, # 'Entrepreneur' Remote Sensor Booster
),
5443 : ( # Faint Epsilon Scoped Warp Scrambler
5441, # Fleeting Progressive Warp Scrambler I
),
1963 : ( # Remote Sensor Booster I
1959, # ECCM Projector I
),
6325 : ( # Fourier Compact Tracking Enhancer
6326, # Sigma-Nought Tracking Mode I
6327, # Auto-Gain Control Tracking Enhancer I
6328, # F-aQ Phase Code Tracking Subroutines
),
21486: ( # 'Kindred' Gyrostabilizer
21488, # Monophonic Stabilization Actuator I
),
19927: ( # Hypnos Scoped Magnetometric ECM
9518, # Initiated Ion Field ECM I
),
10188: ( # 'Basic' Magnetic Field Stabilizer
11111, # Insulated Stabilizer Array
11109, # Linear Flux Stabilizer
11115, # Gauss Field Balancer
11113, # Magnetic Vortex Stabilizer
),
22919: ( # 'Monopoly' Magnetic Field Stabilizer
22917, # 'Capitalist' Magnetic Field Stabilizer I
),
5839 : ( # IFFA Compact Damage Control
5841, # Emergency Damage Control I
5843, # F85 Peripheral Damage System I
5837, # Pseudoelectron Containment Field I
),
522 : ( # 'Micro' Cap Battery
4747, # Micro Ld-Acid Capacitor Battery I
4751, # Micro Ohm Capacitor Reserve I
4745, # Micro F-4a Ld-Sulfate Capacitor Charge Unit
4749, # Micro Peroxide Capacitor Power Cell
3480, # Micro Capacitor Battery II
),
518 : ( # 'Basic' Gyrostabilizer
5915, # Lateral Gyrostabilizer
5919, # F-M2 Weapon Inertial Suspensor
5913, # Hydraulic Stabilization Actuator
5917, # Stabilized Weapon Mounts
),
19931: ( # Compulsive Scoped Multispectral ECM
19933, # 'Hypnos' Multispectral ECM I
),
5403 : ( # Faint Scoped Warp Disruptor
5401, # Fleeting Warp Disruptor I
),
23902: ( # 'Trebuchet' Heat Sink I
23900, # 'Mangonel' Heat Sink I
),
1893 : ( # 'Basic' Heat Sink
5845, # Heat Exhaust System
5856, # C3S Convection Thermal Radiator
5855, # 'Boreas' Coolant System
5854, # Stamped Heat Sink
),
6160 : ( # F-90 Compact Sensor Booster
20214, # Extra Radar ECCM Scanning Array I
20220, # Extra Ladar ECCM Scanning Array I
20226, # Extra Gravimetric ECCM Scanning Array I
20232, # Extra Magnetometric ECCM Scanning Array I
7948, # Gravimetric Positional ECCM Sensor System I
7964, # Radar Positional ECCM Sensor System I
7965, # Omni Positional ECCM Sensor System I
7966, # Ladar Positional ECCM Sensor System I
7970, # Magnetometric Positional ECCM Sensor System I
20218, # Conjunctive Radar ECCM Scanning Array I
20224, # Conjunctive Ladar ECCM Scanning Array I
20230, # Conjunctive Gravimetric ECCM Scanning Array I
20236, # Conjunctive Magnetometric ECCM Scanning Array I
6157, # Supplemental Scanning CPU I
),
23418: ( # 'Radical' Damage Control
22893, # 'Gonzo' Damage Control I
),
19952: ( # Umbra Scoped Radar ECM
9520, # 'Penumbra' White Noise ECM
),
1952 : ( # Sensor Booster II
2258, # ECCM - Omni II
2259, # ECCM - Gravimetric II
2260, # ECCM - Ladar II
2261, # ECCM - Magnetometric II
2262, # ECCM - Radar II
),
5282 : ( # Linked Enduring Sensor Booster
7219, # Scattering ECCM Projector I
),
1986 : ( # Signal Amplifier I
2579, # Gravimetric Backup Array I
2583, # Ladar Backup Array I
2587, # Magnetometric Backup Array I
2591, # Multi Sensor Backup Array I
4013, # RADAR Backup Array I
),
4871 : ( # Large Compact Pb-Acid Cap Battery
4875, # Large Ohm Capacitor Reserve I
4869, # Large F-4a Ld-Sulfate Capacitor Charge Unit
4873, # Large Peroxide Capacitor Power Cell
),
1964 : ( # Remote Sensor Booster II
1960, # ECCM Projector II
),
5933 : ( # Counterbalanced Compact Gyrostabilizer
5931, # Cross-Lateral Gyrostabilizer I
5935, # F-M3 Munition Inertial Suspensor
5929, # Pneumatic Stabilization Actuator I
),
4025 : ( # X5 Enduring Stasis Webifier
4029, # 'Langour' Drive Disruptor I
),
4027 : ( # Fleeting Compact Stasis Webifier
4031, # Patterned Stasis Web I
),
22937: ( # 'Enterprise' Remote Tracking Computer
22935, # 'Tycoon' Remote Tracking Computer
),
22929: ( # 'Marketeer' Tracking Computer
22927, # 'Economist' Tracking Computer I
),
1987 : ( # Signal Amplifier II
2580, # Gravimetric Backup Array II
2584, # Ladar Backup Array II
2588, # Magnetometric Backup Array II
2592, # Multi Sensor Backup Array II
4014, # RADAR Backup Array II
),
19939: ( # Enfeebling Scoped Ladar ECM
9522, # Faint Phase Inversion ECM I
),
5340 : ( # P-S Compact Remote Tracking Computer
5341, # 'Prayer' Remote Tracking Computer
),
19814: ( # Phased Scoped Target Painter
19808, # Partial Weapon Navigation
),
1949 : ( # 'Basic' Signal Amplifier
1946, # Basic RADAR Backup Array
1982, # Basic Ladar Backup Array
1983, # Basic Gravimetric Backup Array
1984, # Basic Magnetometric Backup Array
1985, # Basic Multi Sensor Backup Array
6193, # Emergency Magnetometric Scanners
6194, # Emergency Multi-Frequency Scanners
6202, # Emergency RADAR Scanners
6216, # Emergency Ladar Scanners
6217, # Emergency Gravimetric Scanners
6225, # Sealed RADAR Backup Cluster
6238, # Sealed Magnetometric Backup Cluster
6239, # Sealed Multi-Frequency Backup Cluster
6241, # Sealed Ladar Backup Cluster
6242, # Sealed Gravimetric Backup Cluster
6257, # Surplus RADAR Reserve Array
6258, # F-42 Reiterative RADAR Backup Sensors
6283, # Surplus Magnetometric Reserve Array
6284, # F-42 Reiterative Magnetometric Backup Sensors
6285, # Surplus Multi-Frequency Reserve Array
6286, # F-42 Reiterative Multi-Frequency Backup Sensors
6289, # Surplus Ladar Reserve Array
6290, # F-42 Reiterative Ladar Backup Sensors
6291, # Surplus Gravimetric Reserve Array
6292, # F-42 Reiterative Gravimetric Backup Sensors
6309, # Amplitude Signal Enhancer
6310, # 'Acolyth' Signal Booster
6311, # Type-E Discriminative Signal Augmentation
6312, # F-90 Positional Signal Amplifier
),
21527: ( # 'Firewall' Signal Amplifier
21521, # Gravimetric Firewall
21523, # Ladar Firewall
21525, # Magnetometric Firewall
21527, # Multi Sensor Firewall
21529, # RADAR Firewall
),
23416: ( # 'Peace' Large Remote Armor Repairer
None, # 'Pacifier' Large Remote Armor Repairer
),
6176 : ( # F-12 Enduring Tracking Computer
6174, # Monopulse Tracking Mechanism I
),
6159 : ( # Alumel-Wired Enduring Sensor Booster
7917, # Alumel Radar ECCM Sensor Array I
7918, # Alumel Ladar ECCM Sensor Array I
7922, # Alumel Gravimetric ECCM Sensor Array I
7926, # Alumel Omni ECCM Sensor Array I
7937, # Alumel Magnetometric ECCM Sensor Array I
7867, # Supplemental Ladar ECCM Scanning Array I
7869, # Supplemental Gravimetric ECCM Scanning Array I
7870, # Supplemental Omni ECCM Scanning Array I
7887, # Supplemental Radar ECCM Scanning Array I
7889, # Supplemental Magnetometric ECCM Scanning Array I
20216, # Incremental Radar ECCM Scanning Array I
20222, # Incremental Ladar ECCM Scanning Array I
20228, # Incremental Gravimetric ECCM Scanning Array I
20234, # Incremental Magnetometric ECCM Scanning Array I
7892, # Prototype ECCM Radar Sensor Cluster
7893, # Prototype ECCM Ladar Sensor Cluster
7895, # Prototype ECCM Gravimetric Sensor Cluster
7896, # Prototype ECCM Omni Sensor Cluster
7914, # Prototype ECCM Magnetometric Sensor Cluster
6158, # Prototype Sensor Booster
),
5849 : ( # Extruded Compact Heat Sink
5846, # Thermal Exhaust System I
5858, # C4S Coiled Circuit Thermal Radiator
5857, # 'Skadi' Coolant System I
),
22895: ( # 'Shady' Sensor Booster
22897, # 'Forger' ECCM - Magnetometric I
),
11105: ( # Vortex Compact Magnetic Field Stabilizer
11103, # Insulated Stabilizer Array I
11101, # Linear Flux Stabilizer I
11107, # Gauss Field Balancer I
),
22945: ( # 'Executive' Remote Sensor Dampener
22943, # 'Broker' Remote Sensor Dampener I
),
6173 : ( # Optical Compact Tracking Computer
6175, # 'Orion' Tracking CPU I
),
5279 : ( # F-23 Compact Remote Sensor Booster
7217, # Spot Pulsing ECCM I
7220, # Phased Muon ECCM Caster I
5280, # Connected Remote Sensor Booster
),
4787 : ( # Small Compact Pb-Acid Cap Battery
4791, # Small Ohm Capacitor Reserve I
4785, # Small F-4a Ld-Sulfate Capacitor Charge Unit
4789, # Small Peroxide Capacitor Power Cell
),
19946: ( # BZ-5 Scoped Gravimetric ECM
9519, # FZ-3 Subversive Spatial Destabilizer ECM
),
6073 : ( # Medium Compact Pb-Acid Cap Battery
6097, # Medium Ohm Capacitor Reserve I
6111, # Medium F-4a Ld-Sulfate Capacitor Charge Unit
6083, # Medium Peroxide Capacitor Power Cell
),
21484: ( # 'Full Duplex' Ballistic Control System
21482, # Ballistic 'Purge' Targeting System I
),
6296 : ( # F-89 Compact Signal Amplifier
6218, # Protected Gravimetric Backup Cluster I
6222, # Protected Ladar Backup Cluster I
6226, # Protected Magnetometric Backup Cluster I
6230, # Protected Multi-Frequency Backup Cluster I
6234, # Protected RADAR Backup Cluster I
6195, # Reserve Gravimetric Scanners
6199, # Reserve Ladar Scanners
6203, # Reserve Magnetometric Scanners
6207, # Reserve Multi-Frequency Scanners
6212, # Reserve RADAR Scanners
20238, # Secure Gravimetric Backup Cluster I
20244, # Secure Ladar Backup Cluster I
20250, # Secure Magnetometric Backup Cluster I
20260, # Secure Radar Backup Cluster I
6244, # F-43 Repetitive Gravimetric Backup Sensors
6252, # F-43 Repetitive Ladar Backup Sensors
6260, # F-43 Repetitive Magnetometric Backup Sensors
6268, # F-43 Repetitive Multi-Frequency Backup Sensors
6276, # F-43 Repetitive RADAR Backup Sensors
20240, # Shielded Gravimetric Backup Cluster I
20246, # Shielded Ladar Backup Cluster I
20252, # Shielded Magnetometric Backup Cluster I
20262, # Shielded Radar Backup Cluster I
6243, # Surrogate Gravimetric Reserve Array I
6251, # Surrogate Ladar Reserve Array I
6259, # Surrogate Magnetometric Reserve Array I
6267, # Surrogate Multi-Frequency Reserve Array I
6275, # Surrogate RADAR Reserve Array I
20242, # Warded Gravimetric Backup Cluster I
20248, # Warded Ladar Backup Cluster I
20254, # Warded Magnetometric Backup Cluster I
20264, # Warded Radar Backup Cluster I
6294, # 'Mendicant' Signal Booster I
6293, # Wavelength Signal Enhancer I
6295, # Type-D Attenuation Signal Augmentation
),
5302 : ( # Phased Muon Scoped Sensor Dampener
5300, # Indirect Scanning Dampening Unit I
),
}
def upgrade(saveddata_engine):
# Convert modules
for replacement_item, list in CONVERSIONS.items():
for retired_item in list:
saveddata_engine.execute('UPDATE "modules" SET "itemID" = ? WHERE "itemID" = ?',
(replacement_item, retired_item))
saveddata_engine.execute('UPDATE "cargo" SET "itemID" = ? WHERE "itemID" = ?',
(replacement_item, retired_item))
| gpl-3.0 |
maxsocl/django | tests/queries/models.py | 36 | 16195 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, null=True)
objectb = models.ForeignKey(ObjectB, null=True)
childobjecta = models.ForeignKey(ChildObjectA, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company)
employee = models.ForeignKey(Person)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School)
class Classroom(models.Model):
school = models.ForeignKey(School)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605A(models.Model):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A)
modelc_fk = models.ForeignKey("Ticket23605C")
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
| bsd-3-clause |
tbinjiayou/Odoo | addons/procurement/wizard/__init__.py | 374 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import schedulers_all
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jirikuncar/invenio | invenio/legacy/webbasket/webinterface.py | 13 | 76050 | # This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2013, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebBasket Web Interface."""
__revision__ = "$Id$"
__lastupdated__ = """$Date$"""
from invenio.utils import apache
import os
import cgi
import urllib
from invenio.config import CFG_SITE_SECURE_URL, \
CFG_ACCESS_CONTROL_LEVEL_SITE, \
CFG_SITE_SECURE_URL, CFG_PREFIX, CFG_SITE_LANG
from invenio.base.globals import cfg
from invenio.base.i18n import gettext_set_language
from invenio.legacy.webpage import page
from invenio.legacy.webuser import getUid, page_not_authorized, isGuestUser
from invenio.legacy.webbasket.api import \
check_user_can_comment, \
check_sufficient_rights, \
perform_request_display, \
perform_request_search, \
create_guest_warning_box, \
create_basket_navtrail, \
perform_request_write_note, \
perform_request_save_note, \
perform_request_delete_note, \
perform_request_add_group, \
perform_request_edit, \
perform_request_edit_topic, \
perform_request_list_public_baskets, \
perform_request_unsubscribe, \
perform_request_subscribe, \
perform_request_display_public, \
perform_request_write_public_note, \
perform_request_save_public_note, \
delete_record, \
move_record, \
perform_request_add, \
perform_request_create_basket, \
perform_request_delete, \
wash_topic, \
wash_group, \
perform_request_export_xml, \
page_start, \
page_end
from invenio.legacy.webbasket.db_layer import get_basket_name, \
get_max_user_rights_on_basket
from invenio.utils.url import get_referer, redirect_to_url, make_canonical_urlargd
from invenio.ext.legacy.handler import wash_urlargd, WebInterfaceDirectory
from invenio.legacy.webstat.api import register_customevent
from invenio.ext.logging import register_exception
from invenio.legacy.webuser import collect_user_info
from invenio.modules.comments.api import check_user_can_attach_file_to_comments
from invenio.modules.access.engine import acc_authorize_action
from invenio.utils.html import is_html_text_editor_installed
from invenio.legacy.ckeditor.connector import process_CKEditor_upload, send_response
from invenio.legacy.bibdocfile.api import stream_file
class WebInterfaceBasketCommentsFiles(WebInterfaceDirectory):
"""Handle upload and access to files for comments in WebBasket.
The upload is currently only available through the CKEditor.
"""
def _lookup(self, component, path):
""" This handler is invoked for the dynamic URLs (for getting
and putting attachments) Eg:
/yourbaskets/attachments/get/31/652/5/file/myfile.pdf
/yourbaskets/attachments/get/31/552/5/image/myfigure.png
bskid/recid/uid/
/yourbaskets/attachments/put/31/550/
bskid/recid
"""
if component == 'get' and len(path) > 4:
bskid = path[0] # Basket id
recid = path[1] # Record id
uid = path[2] # uid of the submitter
file_type = path[3] # file, image, flash or media (as
# defined by CKEditor)
if file_type in ['file', 'image', 'flash', 'media']:
file_name = '/'.join(path[4:]) # the filename
def answer_get(req, form):
"""Accessing files attached to comments."""
form['file'] = file_name
form['type'] = file_type
form['uid'] = uid
form['recid'] = recid
form['bskid'] = bskid
return self._get(req, form)
return answer_get, []
elif component == 'put' and len(path) > 1:
bskid = path[0] # Basket id
recid = path[1] # Record id
def answer_put(req, form):
"""Attaching file to a comment."""
form['recid'] = recid
form['bskid'] = bskid
return self._put(req, form)
return answer_put, []
# All other cases: file not found
return None, []
def _get(self, req, form):
"""
Returns a file attached to a comment.
A file is attached to a comment of a record of a basket, by a
user (who is the author of the comment), and is of a certain
type (file, image, etc). Therefore these 5 values are part of
the URL. Eg:
CFG_SITE_SECURE_URL/yourbaskets/attachments/get/31/91/5/file/myfile.pdf
bskid/recid/uid
"""
argd = wash_urlargd(form, {'file': (str, None),
'type': (str, None),
'uid': (int, 0),
'bskid': (int, 0),
'recid': (int, 0)})
_ = gettext_set_language(argd['ln'])
# Can user view this basket & record & comment, i.e. can user
# access its attachments?
#uid = getUid(req)
user_info = collect_user_info(req)
rights = get_max_user_rights_on_basket(argd['uid'], argd['bskid'])
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if user_info['email'] == 'guest':
# Ask to login
target = CFG_SITE_SECURE_URL + '/youraccount/login' + \
make_canonical_urlargd({'ln' : argd['ln'], 'referer' : \
CFG_SITE_SECURE_URL + user_info['uri']}, {})
return redirect_to_url(req, target)
elif not(check_sufficient_rights(rights, cfg['CFG_WEBBASKET_SHARE_LEVELS']['READITM'])):
return page_not_authorized(req, "../", \
text = _("You are not authorized to view this attachment"))
if not argd['file'] is None:
# Prepare path to file on disk. Normalize the path so that
# ../ and other dangerous components are removed.
path = os.path.abspath(CFG_PREFIX + '/var/data/baskets/comments/' + \
str(argd['bskid']) + '/' + str(argd['recid']) + '/' + \
str(argd['uid']) + '/' + argd['type'] + '/' + \
argd['file'])
# Check that we are really accessing attachements
# directory, for the declared basket and record.
if path.startswith(CFG_PREFIX + '/var/data/baskets/comments/' + \
str(argd['bskid']) + '/' + str(argd['recid'])) and \
os.path.exists(path):
return stream_file(req, path)
# Send error 404 in all other cases
return apache.HTTP_NOT_FOUND
def _put(self, req, form):
"""
Process requests received from CKEditor to upload files, etc.
URL eg:
CFG_SITE_SECURE_URL/yourbaskets/attachments/put/31/91/
bskid/recid/
"""
if not is_html_text_editor_installed():
return
argd = wash_urlargd(form, {'bskid': (int, 0),
'recid': (int, 0)})
uid = getUid(req)
# URL where the file can be fetched after upload
user_files_path = '%(CFG_SITE_SECURE_URL)s/yourbaskets/attachments/get/%(bskid)s/%(recid)i/%(uid)s' % \
{'uid': uid,
'recid': argd['recid'],
'bskid': argd['bskid'],
'CFG_SITE_SECURE_URL': CFG_SITE_SECURE_URL}
# Path to directory where uploaded files are saved
user_files_absolute_path = '%(CFG_PREFIX)s/var/data/baskets/comments/%(bskid)s/%(recid)s/%(uid)s' % \
{'uid': uid,
'recid': argd['recid'],
'bskid': argd['bskid'],
'CFG_PREFIX': CFG_PREFIX}
# Check that user can
# 1. is logged in
# 2. comment records of this basket (to simplify, we use
# WebComment function to check this, even if it is not
# entirely adequate)
# 3. attach files
user_info = collect_user_info(req)
(auth_code, dummy) = check_user_can_attach_file_to_comments(user_info, argd['recid'])
fileurl = ''
callback_function = ''
if user_info['email'] == 'guest':
# 1. User is guest: must login prior to upload
data ='Please login before uploading file.'
if not user_info['precached_usebaskets']:
msg = 'Sorry, you are not allowed to use WebBasket'
elif not check_user_can_comment(uid, argd['bskid']):
# 2. User cannot edit comment of this basket
msg = 'Sorry, you are not allowed to submit files'
elif auth_code:
# 3. User cannot submit
msg = 'Sorry, you are not allowed to submit files.'
else:
# Process the upload and get the response
(msg, uploaded_file_path, filename, fileurl, callback_function) = \
process_CKEditor_upload(form, uid, user_files_path, user_files_absolute_path,
recid=argd['recid'])
send_response(req, msg, fileurl, callback_function)
class WebInterfaceYourBasketsPages(WebInterfaceDirectory):
"""Defines the set of /yourbaskets pages."""
_exports = ['',
'display_item',
'display',
'search',
'write_note',
'save_note',
'delete_note',
'add',
'delete',
'modify',
'edit',
'edit_topic',
'create_basket',
'display_public',
'list_public_baskets',
'subscribe',
'unsubscribe',
'write_public_note',
'save_public_note',
'attachments']
attachments = WebInterfaceBasketCommentsFiles()
def index(self, req, dummy):
"""Index page."""
redirect_to_url(req, '%s/yourbaskets/display?%s' % (CFG_SITE_SECURE_URL, req.args))
def display_item(self, req, dummy):
"""Legacy URL redirection."""
redirect_to_url(req, '%s/yourbaskets/display?%s' % (CFG_SITE_SECURE_URL, req.args))
def display(self, req, form):
"""Display basket interface."""
#import rpdb2; rpdb2.start_embedded_debugger('password', fAllowRemote=True)
argd = wash_urlargd(form, {'category':
(str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic': (str, ""),
'group': (int, 0),
'bskid': (int, 0),
'recid': (int, 0),
'bsk_to_sort': (int, 0),
'sort_by_title': (str, ""),
'sort_by_date': (str, ""),
'of': (str, "hb"),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/display",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/display%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, dummy, navtrail) = perform_request_display(uid=uid,
selected_category=argd['category'],
selected_topic=argd['topic'],
selected_group_id=argd['group'],
selected_bskid=argd['bskid'],
selected_recid=argd['recid'],
of=argd['of'],
ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
# register event in webstat
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["display", "", user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
rssurl = CFG_SITE_SECURE_URL + "/rss"
if argd['of'] != 'hb':
page_start(req, of=argd['of'])
if argd['of'].startswith('x'):
req.write(body)
page_end(req, of=argd['of'])
return
elif argd['bskid']:
rssurl = "%s/yourbaskets/display?category=%s&topic=%s&group=%i&bskid=%i&of=xr" % \
(CFG_SITE_SECURE_URL,
argd['category'],
urllib.quote(argd['topic']),
argd['group'],
argd['bskid'])
return page(title = _("Display baskets"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1,
rssurl=rssurl)
def search(self, req, form):
"""Search baskets interface."""
argd = wash_urlargd(form, {'category': (str, ""),
'topic': (str, ""),
'group': (int, 0),
'p': (str, ""),
'b': (str, ""),
'n': (int, 0),
'of': (str, "hb"),
'verbose': (int, 0),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/search",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/search%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_search(uid=uid,
selected_category=argd['category'],
selected_topic=argd['topic'],
selected_group_id=argd['group'],
p=argd['p'],
b=argd['b'],
n=argd['n'],
# format=argd['of'],
ln=argd['ln'])
# register event in webstat
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["search", "", user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Search baskets"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def write_note(self, req, form):
"""Write a comment (just interface for writing)"""
argd = wash_urlargd(form, {'category': (str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic': (str, ""),
'group': (int, 0),
'bskid': (int, 0),
'recid': (int, 0),
'cmtid': (int, 0),
'of' : (str, ''),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/write_note",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/write_note%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_write_note(uid=uid,
category=argd['category'],
topic=argd['topic'],
group_id=argd['group'],
bskid=argd['bskid'],
recid=argd['recid'],
cmtid=argd['cmtid'],
ln=argd['ln'])
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["write_note", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Add a note"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def save_note(self, req, form):
"""Save comment on record in basket"""
argd = wash_urlargd(form, {'category': (str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic': (str, ""),
'group': (int, 0),
'bskid': (int, 0),
'recid': (int, 0),
'note_title': (str, ""),
'note_body': (str, ""),
'date_creation': (str, ""),
'editor_type': (str, ""),
'of': (str, ''),
'ln': (str, CFG_SITE_LANG),
'reply_to': (int, 0)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/save_note",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/save_note%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_save_note(uid=uid,
category=argd['category'],
topic=argd['topic'],
group_id=argd['group'],
bskid=argd['bskid'],
recid=argd['recid'],
note_title=argd['note_title'],
note_body=argd['note_body'],
date_creation=argd['date_creation'],
editor_type=argd['editor_type'],
ln=argd['ln'],
reply_to=argd['reply_to'])
# TODO: do not stat event if save was not succussful
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["save_note", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Display item and notes"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def delete_note(self, req, form):
"""Delete a comment
@param bskid: id of basket (int)
@param recid: id of record (int)
@param cmtid: id of comment (int)
@param category: category (see webbasket_config) (str)
@param topic: nb of topic currently displayed (int)
@param group: id of group baskets currently displayed (int)
@param ln: language"""
argd = wash_urlargd(form, {'category': (str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic': (str, ""),
'group': (int, 0),
'bskid': (int, 0),
'recid': (int, 0),
'cmtid': (int, 0),
'of' : (str, ''),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/delete_note",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/delete_note%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/display%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_delete_note(uid=uid,
category=argd['category'],
topic=argd['topic'],
group_id=argd['group'],
bskid=argd['bskid'],
recid=argd['recid'],
cmtid=argd['cmtid'],
ln=argd['ln'])
# TODO: do not stat event if delete was not succussful
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
user_info = collect_user_info(req)
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["delete_note", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Display item and notes"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def add(self, req, form):
"""Add records to baskets.
@param recid: list of records to add
@param colid: in case of external collections, the id of the collection the records belong to
@param bskids: list of baskets to add records to. if not provided,
will return a page where user can select baskets
@param referer: URL of the referring page
@param new_basket_name: add record to new basket
@param new_topic_name: new basket goes into new topic
@param create_in_topic: # of topic to put basket into
@param ln: language"""
# TODO: apply a maximum limit of items (100) that can be added to a basket
# at once. Also see the build_search_url function of websearch_..._searcher.py
# for the "rg" GET variable.
argd = wash_urlargd(form, {'recid': (list, []),
'category': (str, ""),
'bskid': (int, 0),
'colid': (int, 0),
'es_title': (str, ""),
'es_desc': (str, ""),
'es_url': (str, ""),
'note_body': (str, ""),
'date_creation': (str, ""),
'editor_type': (str, ""),
'b': (str, ""),
'copy': (int, 0),
'move_from_basket': (int, 0),
'wait': (int, 0),
'referer': (str, ""),
'of': (str, ''),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/add",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/add%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if not argd['referer']:
argd['referer'] = get_referer(req)
(body, navtrail) = perform_request_add(uid=uid,
recids=argd['recid'],
colid=argd['colid'],
bskid=argd['bskid'],
es_title=argd['es_title'],
es_desc=argd['es_desc'],
es_url=argd['es_url'],
note_body=argd['note_body'],
date_creation=argd['date_creation'],
editor_type=argd['editor_type'],
category=argd['category'],
b=argd['b'],
copy=argd['copy'],
move_from_basket=argd['move_from_basket'],
wait=argd['wait'],
referer=argd['referer'],
ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
# register event in webstat
bskid = argd['bskid']
basket_str = "%s (%s)" % (get_basket_name(bskid), bskid)
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["add", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _('Add to basket'),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def delete(self, req, form):
"""Delete basket interface"""
argd = wash_urlargd(form, {'bskid' : (int, -1),
'confirmed' : (int, 0),
'category' : (str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic' : (str, ""),
'group' : (int, 0),
'of' : (str, ''),
'ln' : (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/delete",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/delete%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
body=perform_request_delete(uid=uid,
bskid=argd['bskid'],
confirmed=argd['confirmed'],
category=argd['category'],
selected_topic=argd['topic'],
selected_group_id=argd['group'],
ln=argd['ln'])
if argd['confirmed']:
if argd['category'] == cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']:
argd['topic'] = wash_topic(uid, argd['topic'])[0]
elif argd['category'] == cfg['CFG_WEBBASKET_CATEGORIES']['GROUP']:
argd['group'] = wash_group(uid, argd['group'])[0]
url = """%s/yourbaskets/display?category=%s&topic=%s&group=%i&ln=%s""" % \
(CFG_SITE_SECURE_URL,
argd['category'],
urllib.quote(argd['topic']),
argd['group'],
argd['ln'])
redirect_to_url(req, url)
else:
navtrail = '<a class="navtrail" href="%s/youraccount/display?ln=%s">'\
'%s</a>'
navtrail %= (CFG_SITE_SECURE_URL, argd['ln'], _("Your Account"))
navtrail_end = create_basket_navtrail(uid=uid,
category=argd['category'],
topic=argd['topic'],
group=argd['group'],
bskid=argd['bskid'],
ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["delete", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Delete a basket"),
body = body,
navtrail = navtrail + navtrail_end,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def modify(self, req, form):
"""Modify basket content interface (reorder, suppress record, etc.)"""
argd = wash_urlargd(form, {'action': (str, ""),
'bskid': (int, -1),
'recid': (int, 0),
'category': (str, cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE']),
'topic': (str, ""),
'group': (int, 0),
'of' : (str, ''),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/modify",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/modify%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
url = CFG_SITE_SECURE_URL
url += '/yourbaskets/display?category=%s&topic=%s&group=%i&bskid=%i&ln=%s' % \
(argd['category'], urllib.quote(argd['topic']), argd['group'], argd['bskid'], argd['ln'])
if argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['DELETE']:
delete_record(uid, argd['bskid'], argd['recid'])
redirect_to_url(req, url)
elif argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['UP']:
move_record(uid, argd['bskid'], argd['recid'], argd['action'])
redirect_to_url(req, url)
elif argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['DOWN']:
move_record(uid, argd['bskid'], argd['recid'], argd['action'])
redirect_to_url(req, url)
elif argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['COPY'] or \
argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['MOVE']:
if(argd['action'] == cfg['CFG_WEBBASKET_ACTIONS']['MOVE']):
title = _("Move record to basket")
from_bsk = argd['bskid']
else:
title = _("Copy record to basket")
from_bsk = 0
referer = get_referer(req)
(body, navtrail) = perform_request_add(uid=uid,
recids=argd['recid'],
copy=True,
move_from_basket=from_bsk,
referer=referer,
ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
else:
title = ''
body = ''
# warnings = [('WRN_WEBBASKET_UNDEFINED_ACTION',)]
navtrail = '<a class="navtrail" href="%s/youraccount/display?ln=%s">'\
'%s</a>'
navtrail %= (CFG_SITE_SECURE_URL, argd['ln'], _("Your Account"))
navtrail_end = create_basket_navtrail(uid=uid,
category=argd['category'],
topic=argd['topic'],
group=argd['group'],
bskid=argd['bskid'],
ln=argd['ln'])
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["modify", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = title,
body = body,
navtrail = navtrail + navtrail_end,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def edit(self, req, form):
"""Edit basket interface"""
argd = wash_urlargd(form, {'bskid': (int, 0),
'groups': (list, []),
'topic': (str, ""),
'add_group': (str, ""),
'group_cancel': (str, ""),
'submit': (str, ""),
'cancel': (str, ""),
'delete': (str, ""),
'new_name': (str, ""),
'new_topic': (str, ""),
'new_topic_name': (str, ""),
'new_group': (str, ""),
'external': (str, ""),
'of' : (str, ''),
'ln': (str, CFG_SITE_LANG)})
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/edit",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/edit%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if argd['cancel']:
url = CFG_SITE_SECURE_URL + '/yourbaskets/display?category=%s&topic=%s&ln=%s'
url %= (cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
urllib.quote(argd['topic']),
argd['ln'])
redirect_to_url(req, url)
elif argd['delete']:
url = CFG_SITE_SECURE_URL
url += '/yourbaskets/delete?bskid=%i&category=%s&topic=%s&ln=%s' % \
(argd['bskid'],
cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
urllib.quote(argd['topic']),
argd['ln'])
redirect_to_url(req, url)
elif argd['add_group'] and not(argd['new_group']):
body = perform_request_add_group(uid=uid,
bskid=argd['bskid'],
topic=argd['topic'],
ln=argd['ln'])
# warnings = []
elif (argd['add_group'] and argd['new_group']) or argd['group_cancel']:
if argd['add_group']:
perform_request_add_group(uid=uid,
bskid=argd['bskid'],
topic=argd['topic'],
group_id=argd['new_group'],
ln=argd['ln'])
body = perform_request_edit(uid=uid,
bskid=argd['bskid'],
topic=argd['topic'],
ln=argd['ln'])
elif argd['submit']:
body = perform_request_edit(uid=uid,
bskid=argd['bskid'],
topic=argd['topic'],
new_name=argd['new_name'],
new_topic=argd['new_topic'],
new_topic_name=argd['new_topic_name'],
groups=argd['groups'],
external=argd['external'],
ln=argd['ln'])
if argd['new_topic'] != "-1":
argd['topic'] = argd['new_topic']
url = CFG_SITE_SECURE_URL + '/yourbaskets/display?category=%s&topic=%s&ln=%s' % \
(cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
urllib.quote(argd['topic']),
argd['ln'])
redirect_to_url(req, url)
else:
body = perform_request_edit(uid=uid,
bskid=argd['bskid'],
topic=argd['topic'],
ln=argd['ln'])
navtrail = '<a class="navtrail" href="%s/youraccount/display?ln=%s">'\
'%s</a>'
navtrail %= (CFG_SITE_SECURE_URL, argd['ln'], _("Your Account"))
navtrail_end = create_basket_navtrail(
uid=uid,
category=cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
topic=argd['topic'],
group=0,
bskid=argd['bskid'],
ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["edit", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Edit basket"),
body = body,
navtrail = navtrail + navtrail_end,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def edit_topic(self, req, form):
"""Edit topic interface"""
argd = wash_urlargd(form, {'topic': (str, ""),
'submit': (str, ""),
'cancel': (str, ""),
'delete': (str, ""),
'new_name': (str, ""),
'of' : (str, ''),
'ln': (str, CFG_SITE_LANG)})
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/edit",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/edit_topic%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
_ = gettext_set_language(argd['ln'])
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if argd['cancel']:
url = CFG_SITE_SECURE_URL + '/yourbaskets/display?category=%s&ln=%s'
url %= (cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'], argd['ln'])
redirect_to_url(req, url)
elif argd['delete']:
url = CFG_SITE_SECURE_URL
url += '/yourbaskets/delete?bskid=%i&category=%s&topic=%s&ln=%s' % \
(argd['bskid'],
cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
urllib.quote(argd['topic']),
argd['ln'])
redirect_to_url(req, url)
elif argd['submit']:
body = perform_request_edit_topic(uid=uid,
topic=argd['topic'],
new_name=argd['new_name'],
ln=argd['ln'])
url = CFG_SITE_SECURE_URL + '/yourbaskets/display?category=%s&ln=%s' % \
(cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'], argd['ln'])
redirect_to_url(req, url)
else:
body = perform_request_edit_topic(uid=uid,
topic=argd['topic'],
ln=argd['ln'])
navtrail = '<a class="navtrail" href="%s/youraccount/display?ln=%s">'\
'%s</a>'
navtrail %= (CFG_SITE_SECURE_URL, argd['ln'], _("Your Account"))
navtrail_end = ""
#navtrail_end = create_basket_navtrail(
# uid=uid,
# category=cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
# topic=argd['topic'],
# group=0,
# ln=argd['ln'])
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
# register event in webstat
#basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
#if user_info['email']:
# user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
#else:
# user_str = ""
#try:
# register_customevent("baskets", ["edit", basket_str, user_str])
#except:
# register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Edit topic"),
body = body,
navtrail = navtrail + navtrail_end,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def create_basket(self, req, form):
"""Create basket interface"""
argd = wash_urlargd(form, {'new_basket_name': (str, ""),
'new_topic_name' : (str, ""),
'create_in_topic': (str, "-1"),
'topic' : (str, ""),
'recid' : (list, []),
'colid' : (int, -1),
'es_title' : (str, ''),
'es_desc' : (str, ''),
'es_url' : (str, ''),
'copy' : (int, 0),
'move_from_basket':(int, 0),
'referer' : (str, ''),
'of' : (str, ''),
'ln' : (str, CFG_SITE_LANG)})
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/create_basket",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/create_basket%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
_ = gettext_set_language(argd['ln'])
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if argd['new_basket_name'] and \
(argd['new_topic_name'] or argd['create_in_topic'] != "-1"):
(bskid, topic) = perform_request_create_basket(
req,
uid=uid,
new_basket_name=argd['new_basket_name'],
new_topic_name=argd['new_topic_name'],
create_in_topic=argd['create_in_topic'],
recids=argd['recid'],
colid=argd['colid'],
es_title=argd['es_title'],
es_desc=argd['es_desc'],
es_url=argd['es_url'],
copy=argd['copy'],
move_from_basket=argd['move_from_basket'],
referer=argd['referer'],
ln=argd['ln'])
# register event in webstat
basket_str = "%s ()" % argd['new_basket_name']
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["create_basket", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
if ( argd['recid'] and argd['colid'] >= 0 ):
url = CFG_SITE_SECURE_URL + '/yourbaskets/add?category=%s©=%i&referer=%s&bskid=%i&colid=%i&move_from_basket=%i&recid=%s&wait=1&ln=%s'
url %= (cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
argd['copy'],
urllib.quote(argd['referer']),
bskid,
argd['colid'],
argd['move_from_basket'],
'&recid='.join(str(recid) for recid in argd['recid']),
argd['ln'])
elif ( argd['es_title'] and argd['es_desc'] and argd['es_url'] and argd['colid'] == -1 ):
# Adding NEW external record - this does not need 'move_from_basket' data
url = CFG_SITE_SECURE_URL + '/yourbaskets/add?category=%s&bskid=%i&colid=%i&es_title=%s&es_desc=%s&es_url=%s&wait=1&ln=%s'
url %= (cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
bskid,
argd['colid'],
urllib.quote(argd['es_title']),
urllib.quote(argd['es_desc']),
urllib.quote(argd['es_url']),
argd['ln'])
else:
url = CFG_SITE_SECURE_URL + '/yourbaskets/display?category=%s&topic=%s&ln=%s'
url %= (cfg['CFG_WEBBASKET_CATEGORIES']['PRIVATE'],
urllib.quote(topic),
argd['ln'])
redirect_to_url(req, url)
else:
body = perform_request_create_basket(req,
uid=uid,
new_basket_name=argd['new_basket_name'],
new_topic_name=argd['new_topic_name'],
create_in_topic=argd['create_in_topic'],
topic=argd['topic'],
recids=argd['recid'],
colid=argd['colid'],
es_title=argd['es_title'],
es_desc=argd['es_desc'],
es_url=argd['es_url'],
copy=argd['copy'],
move_from_basket=argd['move_from_basket'],
referer=argd['referer'],
ln=argd['ln'])
navtrail = '<a class="navtrail" href="%s/youraccount/'\
'display?ln=%s">%s</a>'
navtrail %= (CFG_SITE_SECURE_URL, argd['ln'], _("Your Account"))
if isGuestUser(uid):
body = create_guest_warning_box(argd['ln']) + body
return page(title = _("Create basket"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def display_public(self, req, form):
"""Display a public basket"""
argd = wash_urlargd(form, {'bskid': (int, 0),
'recid': (int, 0),
'of': (str, "hb"),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/display",
navmenuid = 'yourbaskets')
user_info = collect_user_info(req)
if not argd['bskid']:
(body, navtrail) = perform_request_list_public_baskets(uid)
title = _('List of public baskets')
# register event in webstat
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["list_public_baskets", "", user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
else:
(body, dummy, navtrail) = perform_request_display_public(uid=uid,
selected_bskid=argd['bskid'],
selected_recid=argd['recid'],
of=argd['of'],
ln=argd['ln'])
title = _('Public basket')
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["display_public", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
rssurl = CFG_SITE_SECURE_URL + "/rss"
if argd['of'] != 'hb':
page_start(req, of=argd['of'])
if argd['of'].startswith('x'):
req.write(body)
page_end(req, of=argd['of'])
return
elif argd['bskid']:
rssurl = "%s/yourbaskets/display_public?&bskid=%i&of=xr" % \
(CFG_SITE_SECURE_URL,
argd['bskid'])
return page(title = title,
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1,
rssurl=rssurl)
def list_public_baskets(self, req, form):
"""List of public baskets interface."""
argd = wash_urlargd(form, {'limit': (int, 1),
'sort': (str, 'name'),
'asc': (int, 1),
'of': (str, ''),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
return page_not_authorized(req, "../yourbaskets/list_public_baskets",
navmenuid = 'yourbaskets')
user_info = collect_user_info(req)
nb_views_show = acc_authorize_action(user_info, 'runwebstatadmin')
nb_views_show_p = not(nb_views_show[0])
(body, navtrail) = perform_request_list_public_baskets(uid,
argd['limit'],
argd['sort'],
argd['asc'],
nb_views_show_p,
argd['ln'])
return page(title = _("List of public baskets"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def subscribe(self, req, form):
"""Subscribe to a basket pseudo-interface."""
argd = wash_urlargd(form, {'bskid': (int, 0),
'of': (str, 'hb'),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
return page_not_authorized(req, "../yourbaskets/subscribe",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/subscribe%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if not argd['bskid']:
(body, navtrail) = perform_request_list_public_baskets(uid)
title = _('List of public baskets')
else:
# TODO: Take care of XML output as shown below
#req.content_type = "text/xml"
#req.send_http_header()
#return perform_request_display_public(bskid=argd['bskid'], of=argd['of'], ln=argd['ln'])
subscribe_warnings_html = perform_request_subscribe(uid, argd['bskid'], argd['ln'])
(body, dummy, navtrail) = perform_request_display_public(uid=uid,
selected_bskid=argd['bskid'],
selected_recid=0,
of=argd['of'],
ln=argd['ln'])
#warnings.extend(subscribe_warnings)
body = subscribe_warnings_html + body
title = _('Public basket')
return page(title = title,
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def unsubscribe(self, req, form):
"""Unsubscribe from basket pseudo-interface."""
argd = wash_urlargd(form, {'bskid': (int, 0),
'of': (str, 'hb'),
'ln': (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE == 2:
return page_not_authorized(req, "../yourbaskets/unsubscribe",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/unsubscribe%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
if not argd['bskid']:
(body, navtrail) = perform_request_list_public_baskets(uid)
title = _('List of public baskets')
else:
# TODO: Take care of XML output as shown below
#req.content_type = "text/xml"
#req.send_http_header()
#return perform_request_display_public(bskid=argd['bskid'], of=argd['of'], ln=argd['ln'])
unsubscribe_warnings_html = perform_request_unsubscribe(uid, argd['bskid'], argd['ln'])
(body, dummy, navtrail) = perform_request_display_public(uid=uid,
selected_bskid=argd['bskid'],
selected_recid=0,
of=argd['of'],
ln=argd['ln'])
# warnings.extend(unsubscribe_warnings)
body = unsubscribe_warnings_html + body
title = _('Public basket')
return page(title = title,
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
def write_public_note(self, req, form):
"""Write a comment (just interface for writing)"""
argd = wash_urlargd(form, {'bskid': (int, 0),
'recid': (int, 0),
'cmtid': (int, 0),
'of' : (str, ''),
'ln' : (str, CFG_SITE_LANG)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/write_public_note",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/write_public_note%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_write_public_note(uid=uid,
bskid=argd['bskid'],
recid=argd['recid'],
cmtid=argd['cmtid'],
ln=argd['ln'])
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["write_public_note", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Add a note"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
secure_page_p=1)
def save_public_note(self, req, form):
"""Save comment on record in basket"""
argd = wash_urlargd(form, {'bskid': (int, 0),
'recid': (int, 0),
'note_title': (str, ""),
'note_body': (str, ""),
'editor_type': (str, ""),
'of': (str, ''),
'ln': (str, CFG_SITE_LANG),
'reply_to': (str, 0)})
_ = gettext_set_language(argd['ln'])
uid = getUid(req)
if uid == -1 or CFG_ACCESS_CONTROL_LEVEL_SITE >= 1:
return page_not_authorized(req, "../yourbaskets/save_public_note",
navmenuid = 'yourbaskets')
if isGuestUser(uid):
return redirect_to_url(req, "%s/youraccount/login%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd({
'referer' : "%s/yourbaskets/save_public_note%s" % (
CFG_SITE_SECURE_URL,
make_canonical_urlargd(argd, {})),
"ln" : argd['ln']}, {})))
user_info = collect_user_info(req)
if not user_info['precached_usebaskets']:
return page_not_authorized(req, "../", \
text = _("You are not authorized to use baskets."))
(body, navtrail) = perform_request_save_public_note(uid=uid,
bskid=argd['bskid'],
recid=argd['recid'],
note_title=argd['note_title'],
note_body=argd['note_body'],
editor_type=argd['editor_type'],
ln=argd['ln'],
reply_to=argd['reply_to'])
# TODO: do not stat event if save was not succussful
# register event in webstat
basket_str = "%s (%d)" % (get_basket_name(argd['bskid']), argd['bskid'])
if user_info['email']:
user_str = "%s (%d)" % (user_info['email'], user_info['uid'])
else:
user_str = ""
try:
register_customevent("baskets", ["save_public_note", basket_str, user_str])
except:
register_exception(suffix="Do the webstat tables exists? Try with 'webstatadmin --load-config'")
return page(title = _("Display item and notes"),
body = body,
navtrail = navtrail,
uid = uid,
lastupdated = __lastupdated__,
language = argd['ln'],
req = req,
navmenuid = 'yourbaskets',
of = argd['of'],
navtrail_append_title_p = 0,
secure_page_p=1)
| gpl-2.0 |
gautam1858/tensorflow | tensorflow/contrib/receptive_field/receptive_field_api.py | 48 | 1204 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module that declares the functions in tf.contrib.receptive_field's API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.contrib.receptive_field.python.util.graph_compute_order import get_compute_order
from tensorflow.contrib.receptive_field.python.util.receptive_field import compute_receptive_field_from_graph_def
# pylint: enable=unused-import
del absolute_import
del division
del print_function
| apache-2.0 |
mhrivnak/pulp | server/pulp/server/db/model/criteria.py | 2 | 13940 | from types import NoneType
import copy
import re
import sys
import pymongo
from pulp.server import exceptions as pulp_exceptions
from pulp.server.db.model.base import Model
class Criteria(Model):
def __init__(self, filters=None, sort=None, limit=None, skip=None, fields=None):
super(Criteria, self).__init__()
assert isinstance(filters, (dict, NoneType))
assert isinstance(sort, (list, tuple, NoneType))
assert isinstance(limit, (int, NoneType))
assert isinstance(skip, (int, NoneType))
assert isinstance(fields, (list, tuple, NoneType))
self.filters = filters
self.sort = sort
self.limit = limit
self.skip = skip
self.fields = fields
def as_dict(self):
"""
@return: the Criteria as a dict, suitable for serialization by
something like JSON, and compatible as input to the
from_dict method.
@rtype: dict
"""
return {
'filters': self.filters,
'sort': self.sort,
'limit': self.limit,
'skip': self.skip,
'fields': self.fields
}
@classmethod
def from_client_input(cls, doc):
"""
Accept input provided by a client (such as through a GET or POST
request), validate that the provided data is part of a Criteria
definition, and ensure that no additional data is present.
@param doc: a dict including only data that corresponds to attributes
of a Criteria object
@type doc: dict
@return: new Criteria instance based on provided data
@rtype: pulp.server.db.model.criteria.Criteria
"""
if not isinstance(doc, dict):
raise pulp_exceptions.InvalidValue(['criteria']), None, sys.exc_info()[2]
doc = copy.copy(doc)
filters = _validate_filters(doc.pop('filters', None))
sort = _validate_sort(doc.pop('sort', None))
limit = _validate_limit(doc.pop('limit', None))
skip = _validate_skip(doc.pop('skip', None))
fields = _validate_fields(doc.pop('fields', None))
if doc:
raise pulp_exceptions.InvalidValue(doc.keys())
return cls(filters, sort, limit, skip, fields)
@classmethod
def from_dict(cls, input_dictionary):
"""
Convert a dictionary representation of the Criteria into a new Criteria object. The output
of as_dict() is suitable as input to this method.
:param input_dictionary: The dictionary representation of a Criteria object that will be
used to construct one.
:type input_dictionary: dict
:return: A new Criteria object
:rtype: Criteria
"""
return cls(input_dictionary['filters'], input_dictionary['sort'], input_dictionary['limit'],
input_dictionary['skip'], input_dictionary['fields'])
@property
def spec(self):
if self.filters is None:
return None
spec = copy.copy(self.filters)
_compile_regexs_for_not(spec)
return spec
class UnitAssociationCriteria(Model):
# Shadowed here for convenience
SORT_ASCENDING = pymongo.ASCENDING
SORT_DESCENDING = pymongo.DESCENDING
def __init__(self, type_ids=None, association_filters=None, unit_filters=None,
association_sort=None, unit_sort=None, limit=None, skip=None,
association_fields=None, unit_fields=None, remove_duplicates=False):
"""
There are a number of entry points into creating one of these instances:
multiple REST interfaces, the plugins, etc. As such, this constructor
does quite a bit of validation on the parameter values.
@param type_ids: list of types to search
@type type_ids: [str]
@param association_filters: mongo spec describing search parameters on
association metadata
@type association_filters: dict
@param unit_filters: mongo spec describing search parameters on unit
metadata; only used when a single type ID is specified
@type unit_filters: dict
@param association_sort: ordered list of fields and directions; may only
contain association metadata
@type association_sort: [(str, <SORT_* constant>)]
@param unit_sort: ordered list of fields and directions; only used when
a single type ID is specified
@type unit_sort: [(str, <SORT_* constant>)]
@param limit: maximum number of results to return
@type limit: int
@param skip: number of results to skip
@type skip: int
@param association_fields: if specified, only the given fields from the
association's metadata will be included in returned units
@type association_fields: list of str
@param unit_fields: if specified, only the given fields from the unit's
metadata are returned; only applies when a single type ID is
specified
@type unit_fields: list of str
@param remove_duplicates: if True, units with multiple associations will
only return a single association; defaults to False
@type remove_duplicates: bool
"""
super(UnitAssociationCriteria, self).__init__()
# A default instance will be used in the case where no criteria is
# passed in, so use sane defaults here.
if type_ids is not None and not isinstance(type_ids, (list, tuple)):
type_ids = [type_ids]
self.type_ids = type_ids
self.association_filters = association_filters or {}
self.unit_filters = unit_filters or {}
self.association_sort = association_sort
self.unit_sort = unit_sort
self.limit = limit
self.skip = skip
# The unit_id and unit_type_id are required as association returned data;
# frankly it doesn't make sense without them but it's also a technical
# requirement for the algorithm to run. Make sure they are there.
if association_fields is not None:
if 'unit_id' not in association_fields:
association_fields.append('unit_id')
if 'unit_type_id' not in association_fields:
association_fields.append('unit_type_id')
self.association_fields = association_fields
self.unit_fields = unit_fields
self.remove_duplicates = remove_duplicates
@classmethod
def from_client_input(cls, query):
"""
Parses a unit association query document and assembles a corresponding
internal criteria object.
Example:
{
"type_ids" : ["rpm"],
"filters" : {
"unit" : <mongo spec syntax>,
"association" : <mongo spec syntax>
},
"sort" : {
"unit" : [ ["name", "ascending"], ["version", "descending"] ],
"association" : [ ["created", "descending"] ]
},
"limit" : 100,
"skip" : 200,
"fields" : {
"unit" : ["name", "version", "arch"],
"association" : ["created"]
},
"remove_duplicates" : True
}
@param query: user-provided query details
@type query: dict
@return: criteria object for the unit association query
@rtype: L{UnitAssociationCriteria}
@raises ValueError: on an invalid value in the query
"""
query = copy.copy(query)
type_ids = query.pop('type_ids', None)
filters = query.pop('filters', None)
if filters is None:
association_filters = None
unit_filters = None
else:
association_filters = _validate_filters(filters.pop('association', None))
unit_filters = _validate_filters(filters.pop('unit', None))
sort = query.pop('sort', None)
if sort is None:
association_sort = None
unit_sort = None
else:
association_sort = _validate_sort(sort.pop('association', None))
unit_sort = _validate_sort(sort.pop('unit', None))
limit = _validate_limit(query.pop('limit', None))
skip = _validate_skip(query.pop('skip', None))
fields = query.pop('fields', None)
if fields is None:
association_fields = None
unit_fields = None
else:
association_fields = _validate_fields(fields.pop('association', None))
unit_fields = _validate_fields(fields.pop('unit', None))
remove_duplicates = bool(query.pop('remove_duplicates', False))
# report any superfluous doc key, value pairs as errors
for d in (query, filters, sort, fields):
if d:
raise pulp_exceptions.InvalidValue(d.keys())
# These are here for backward compatibility, in the future, these
# should be removed and the corresponding association_spec and unit_spec
# properties should be used
if association_filters:
_compile_regexs_for_not(association_filters)
if unit_filters:
_compile_regexs_for_not(unit_filters)
return cls(type_ids=type_ids, association_filters=association_filters,
unit_filters=unit_filters, association_sort=association_sort,
unit_sort=unit_sort, limit=limit, skip=skip,
association_fields=association_fields, unit_fields=unit_fields,
remove_duplicates=remove_duplicates)
@property
def association_spec(self):
if self.association_filters is None:
return None
association_spec = copy.copy(self.association_filters)
_compile_regexs_for_not(association_spec)
return association_spec
@property
def unit_spec(self):
if self.unit_filters is None:
return None
unit_spec = copy.copy(self.unit_filters)
_compile_regexs_for_not(unit_spec)
return unit_spec
def __str__(self):
s = ''
if self.type_ids:
s += 'Type IDs [%s] ' % self.type_ids
if self.association_filters:
s += 'Assoc Filters [%s] ' % self.association_filters
if self.unit_filters is not None:
s += 'Unit Filters [%s] ' % self.unit_filters
if self.association_sort is not None:
s += 'Assoc Sort [%s] ' % self.association_sort
if self.unit_sort is not None:
s += 'Unit Sort [%s] ' % self.unit_sort
if self.limit:
s += 'Limit [%s] ' % self.limit
if self.skip:
s += 'Skip [%s] ' % self.skip
if self.association_fields:
s += 'Assoc Fields [%s] ' % self.association_fields
if self.unit_fields:
s += 'Unit Fields [%s] ' % self.unit_fields
s += 'Remove Duplicates [%s]' % self.remove_duplicates
return s
def _validate_filters(filters):
if filters is None:
return None
if not isinstance(filters, dict):
raise pulp_exceptions.InvalidValue(['filters'])
return filters
def _validate_sort(sort):
"""
@type sort: list, tuple
@rtype: tuple
"""
if sort is None:
return None
if not isinstance(sort, (list, tuple)):
raise pulp_exceptions.InvalidValue(['sort']), None, sys.exc_info()[2]
try:
valid_sort = []
for entry in sort:
if not isinstance(entry[0], basestring):
raise TypeError('Invalid field name [%s]' % str(entry[0]))
flag = str(entry[1]).lower()
direction = None
if flag in ('ascending', '1'):
direction = pymongo.ASCENDING
if flag in ('descending', '-1'):
direction = pymongo.DESCENDING
if direction is None:
raise ValueError('Invalid sort direction [%s]' % flag)
valid_sort.append((entry[0], direction))
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['sort']), None, sys.exc_info()[2]
else:
return valid_sort
def _validate_limit(limit):
if isinstance(limit, bool):
raise pulp_exceptions.InvalidValue(['limit']), None, sys.exc_info()[2]
if limit is None:
return None
try:
limit = int(limit)
if limit < 1:
raise TypeError()
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['limit']), None, sys.exc_info()[2]
else:
return limit
def _validate_skip(skip):
if isinstance(skip, bool):
raise pulp_exceptions.InvalidValue(['skip']), None, sys.exc_info()[2]
if skip is None:
return None
try:
skip = int(skip)
if skip < 0:
raise TypeError()
except (TypeError, ValueError):
raise pulp_exceptions.InvalidValue(['skip']), None, sys.exc_info()[2]
else:
return skip
def _validate_fields(fields):
if fields is None:
return None
try:
if isinstance(fields, (basestring, dict)):
raise TypeError
fields = list(fields)
for f in fields:
if not isinstance(f, basestring):
raise TypeError()
except TypeError:
raise pulp_exceptions.InvalidValue(['fields']), None, sys.exc_info()[2]
return fields
def _compile_regexs_for_not(spec):
if not isinstance(spec, (dict, list, tuple)):
return
if isinstance(spec, (list, tuple)):
map(_compile_regexs_for_not, spec)
return
for key, value in spec.items():
if key == '$not' and isinstance(value, basestring):
spec[key] = re.compile(value)
_compile_regexs_for_not(value)
| gpl-2.0 |
rafaeltomesouza/frontend-class1 | aula2/a13/linkedin/client/.gradle/yarn/node_modules/node-gyp/gyp/pylib/gyp/MSVSSettings.py | 1361 | 45045 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
r"""Code to validate and convert settings of the Microsoft build tools.
This file contains code to validate and convert settings of the Microsoft
build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
and ValidateMSBuildSettings() are the entry points.
This file was created by comparing the projects created by Visual Studio 2008
and Visual Studio 2010 for all available settings through the user interface.
The MSBuild schemas were also considered. They are typically found in the
MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
"""
import sys
import re
# Dictionaries of settings validators. The key is the tool name, the value is
# a dictionary mapping setting names to validation functions.
_msvs_validators = {}
_msbuild_validators = {}
# A dictionary of settings converters. The key is the tool name, the value is
# a dictionary mapping setting names to conversion functions.
_msvs_to_msbuild_converters = {}
# Tool name mapping from MSVS to MSBuild.
_msbuild_name_of_tool = {}
class _Tool(object):
"""Represents a tool used by MSVS or MSBuild.
Attributes:
msvs_name: The name of the tool in MSVS.
msbuild_name: The name of the tool in MSBuild.
"""
def __init__(self, msvs_name, msbuild_name):
self.msvs_name = msvs_name
self.msbuild_name = msbuild_name
def _AddTool(tool):
"""Adds a tool to the four dictionaries used to process settings.
This only defines the tool. Each setting also needs to be added.
Args:
tool: The _Tool object to be added.
"""
_msvs_validators[tool.msvs_name] = {}
_msbuild_validators[tool.msbuild_name] = {}
_msvs_to_msbuild_converters[tool.msvs_name] = {}
_msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
def _GetMSBuildToolSettings(msbuild_settings, tool):
"""Returns an MSBuild tool dictionary. Creates it if needed."""
return msbuild_settings.setdefault(tool.msbuild_name, {})
class _Type(object):
"""Type of settings (Base class)."""
def ValidateMSVS(self, value):
"""Verifies that the value is legal for MSVS.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSVS.
"""
def ValidateMSBuild(self, value):
"""Verifies that the value is legal for MSBuild.
Args:
value: the value to check for this type.
Raises:
ValueError if value is not valid for MSBuild.
"""
def ConvertToMSBuild(self, value):
"""Returns the MSBuild equivalent of the MSVS value given.
Args:
value: the MSVS value to convert.
Returns:
the MSBuild equivalent.
Raises:
ValueError if value is not valid.
"""
return value
class _String(_Type):
"""A setting that's just a string."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring):
raise ValueError('expected string; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
return ConvertVCMacrosToMSBuild(value)
class _StringList(_Type):
"""A settings that's a list of strings."""
def ValidateMSVS(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ValidateMSBuild(self, value):
if not isinstance(value, basestring) and not isinstance(value, list):
raise ValueError('expected string list; got %r' % value)
def ConvertToMSBuild(self, value):
# Convert the macros
if isinstance(value, list):
return [ConvertVCMacrosToMSBuild(i) for i in value]
else:
return ConvertVCMacrosToMSBuild(value)
class _Boolean(_Type):
"""Boolean settings, can have the values 'false' or 'true'."""
def _Validate(self, value):
if value != 'true' and value != 'false':
raise ValueError('expected bool; got %r' % value)
def ValidateMSVS(self, value):
self._Validate(value)
def ValidateMSBuild(self, value):
self._Validate(value)
def ConvertToMSBuild(self, value):
self._Validate(value)
return value
class _Integer(_Type):
"""Integer settings."""
def __init__(self, msbuild_base=10):
_Type.__init__(self)
self._msbuild_base = msbuild_base
def ValidateMSVS(self, value):
# Try to convert, this will raise ValueError if invalid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
# Try to convert, this will raise ValueError if invalid.
int(value, self._msbuild_base)
def ConvertToMSBuild(self, value):
msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
return msbuild_format % int(value)
class _Enumeration(_Type):
"""Type of settings that is an enumeration.
In MSVS, the values are indexes like '0', '1', and '2'.
MSBuild uses text labels that are more representative, like 'Win32'.
Constructor args:
label_list: an array of MSBuild labels that correspond to the MSVS index.
In the rare cases where MSVS has skipped an index value, None is
used in the array to indicate the unused spot.
new: an array of labels that are new to MSBuild.
"""
def __init__(self, label_list, new=None):
_Type.__init__(self)
self._label_list = label_list
self._msbuild_values = set(value for value in label_list
if value is not None)
if new is not None:
self._msbuild_values.update(new)
def ValidateMSVS(self, value):
# Try to convert. It will raise an exception if not valid.
self.ConvertToMSBuild(value)
def ValidateMSBuild(self, value):
if value not in self._msbuild_values:
raise ValueError('unrecognized enumerated value %s' % value)
def ConvertToMSBuild(self, value):
index = int(value)
if index < 0 or index >= len(self._label_list):
raise ValueError('index value (%d) not in expected range [0, %d)' %
(index, len(self._label_list)))
label = self._label_list[index]
if label is None:
raise ValueError('converted value for %s not specified.' % value)
return label
# Instantiate the various generic types.
_boolean = _Boolean()
_integer = _Integer()
# For now, we don't do any special validation on these types:
_string = _String()
_file_name = _String()
_folder_name = _String()
_file_list = _StringList()
_folder_list = _StringList()
_string_list = _StringList()
# Some boolean settings went from numerical values to boolean. The
# mapping is 0: default, 1: false, 2: true.
_newly_boolean = _Enumeration(['', 'false', 'true'])
def _Same(tool, name, setting_type):
"""Defines a setting that has the same name in MSVS and MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
_Renamed(tool, name, name, setting_type)
def _Renamed(tool, msvs_name, msbuild_name, setting_type):
"""Defines a setting for which the name has changed.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting.
msbuild_name: the name of the MSBuild setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
_msbuild_validators[tool.msbuild_name][msbuild_name] = (
setting_type.ValidateMSBuild)
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
_MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
setting_type)
def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
msbuild_settings_name, setting_type):
"""Defines a setting that may have moved to a new section.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_settings_name: the MSVS name of the setting.
msbuild_tool_name: the name of the MSBuild tool to place the setting under.
msbuild_settings_name: the MSBuild name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
_msvs_validators[tool.msvs_name][msvs_settings_name] = (
setting_type.ValidateMSVS)
validator = setting_type.ValidateMSBuild
_msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
def _MSVSOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSVS.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(unused_value, unused_msbuild_settings):
# Since this is for MSVS only settings, no translation will happen.
pass
_msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _MSBuildOnly(tool, name, setting_type):
"""Defines a setting that is only found in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
name: the name of the setting.
setting_type: the type of this setting.
"""
def _Translate(value, msbuild_settings):
# Let msbuild-only properties get translated as-is from msvs_settings.
tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
tool_settings[name] = value
_msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
_msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
def _ConvertedToAdditionalOption(tool, msvs_name, flag):
"""Defines a setting that's handled via a command line option in MSBuild.
Args:
tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
msvs_name: the name of the MSVS setting that if 'true' becomes a flag
flag: the flag to insert at the end of the AdditionalOptions
"""
def _Translate(value, msbuild_settings):
if value == 'true':
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if 'AdditionalOptions' in tool_settings:
new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
else:
new_flags = flag
tool_settings['AdditionalOptions'] = new_flags
_msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
def _CustomGeneratePreprocessedFile(tool, msvs_name):
def _Translate(value, msbuild_settings):
tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
if value == '0':
tool_settings['PreprocessToFile'] = 'false'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '1': # /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'false'
elif value == '2': # /EP /P
tool_settings['PreprocessToFile'] = 'true'
tool_settings['PreprocessSuppressLineNumbers'] = 'true'
else:
raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
# Create a bogus validator that looks for '0', '1', or '2'
msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
_msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
msbuild_validator = _boolean.ValidateMSBuild
msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
_msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
fix_vc_macro_slashes_regex = re.compile(
r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
)
# Regular expression to detect keys that were generated by exclusion lists
_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
"""Verify that 'setting' is valid if it is generated from an exclusion list.
If the setting appears to be generated from an exclusion list, the root name
is checked.
Args:
setting: A string that is the setting name to validate
settings: A dictionary where the keys are valid settings
error_msg: The message to emit in the event of error
stderr: The stream receiving the error messages.
"""
# This may be unrecognized because it's an exclusion list. If the
# setting name has the _excluded suffix, then check the root name.
unrecognized = True
m = re.match(_EXCLUDED_SUFFIX_RE, setting)
if m:
root_setting = m.group(1)
unrecognized = root_setting not in settings
if unrecognized:
# We don't know this setting. Give a warning.
print >> stderr, error_msg
def FixVCMacroSlashes(s):
"""Replace macros which have excessive following slashes.
These macros are known to have a built-in trailing slash. Furthermore, many
scripts hiccup on processing paths with extra slashes in the middle.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
s = fix_vc_macro_slashes_regex.sub(r'\1', s)
return s
def ConvertVCMacrosToMSBuild(s):
"""Convert the the MSVS macros found in the string to the MSBuild equivalent.
This list is probably not exhaustive. Add as needed.
"""
if '$' in s:
replace_map = {
'$(ConfigurationName)': '$(Configuration)',
'$(InputDir)': '%(RelativeDir)',
'$(InputExt)': '%(Extension)',
'$(InputFileName)': '%(Filename)%(Extension)',
'$(InputName)': '%(Filename)',
'$(InputPath)': '%(Identity)',
'$(ParentName)': '$(ProjectFileName)',
'$(PlatformName)': '$(Platform)',
'$(SafeInputName)': '%(Filename)',
}
for old, new in replace_map.iteritems():
s = s.replace(old, new)
s = FixVCMacroSlashes(s)
return s
def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
"""Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
Args:
msvs_settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
Returns:
A dictionary of MSBuild settings. The key is either the MSBuild tool name
or the empty string (for the global settings). The values are themselves
dictionaries of settings and their values.
"""
msbuild_settings = {}
for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems():
if msvs_tool_name in _msvs_to_msbuild_converters:
msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
for msvs_setting, msvs_value in msvs_tool_settings.iteritems():
if msvs_setting in msvs_tool:
# Invoke the translation function.
try:
msvs_tool[msvs_setting](msvs_value, msbuild_settings)
except ValueError, e:
print >> stderr, ('Warning: while converting %s/%s to MSBuild, '
'%s' % (msvs_tool_name, msvs_setting, e))
else:
_ValidateExclusionSetting(msvs_setting,
msvs_tool,
('Warning: unrecognized setting %s/%s '
'while converting to MSBuild.' %
(msvs_tool_name, msvs_setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s while converting to '
'MSBuild.' % msvs_tool_name)
return msbuild_settings
def ValidateMSVSSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSVS.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msvs_validators, settings, stderr)
def ValidateMSBuildSettings(settings, stderr=sys.stderr):
"""Validates that the names of the settings are valid for MSBuild.
Args:
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
_ValidateSettings(_msbuild_validators, settings, stderr)
def _ValidateSettings(validators, settings, stderr):
"""Validates that the settings are valid for MSBuild or MSVS.
We currently only validate the names of the settings, not their values.
Args:
validators: A dictionary of tools and their validators.
settings: A dictionary. The key is the tool name. The values are
themselves dictionaries of settings and their values.
stderr: The stream receiving the error messages.
"""
for tool_name in settings:
if tool_name in validators:
tool_validators = validators[tool_name]
for setting, value in settings[tool_name].iteritems():
if setting in tool_validators:
try:
tool_validators[setting](value)
except ValueError, e:
print >> stderr, ('Warning: for %s/%s, %s' %
(tool_name, setting, e))
else:
_ValidateExclusionSetting(setting,
tool_validators,
('Warning: unrecognized setting %s/%s' %
(tool_name, setting)),
stderr)
else:
print >> stderr, ('Warning: unrecognized tool %s' % tool_name)
# MSVS and MBuild names of the tools.
_compile = _Tool('VCCLCompilerTool', 'ClCompile')
_link = _Tool('VCLinkerTool', 'Link')
_midl = _Tool('VCMIDLTool', 'Midl')
_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
_lib = _Tool('VCLibrarianTool', 'Lib')
_manifest = _Tool('VCManifestTool', 'Manifest')
_masm = _Tool('MASM', 'MASM')
_AddTool(_compile)
_AddTool(_link)
_AddTool(_midl)
_AddTool(_rc)
_AddTool(_lib)
_AddTool(_manifest)
_AddTool(_masm)
# Add sections only found in the MSBuild settings.
_msbuild_validators[''] = {}
_msbuild_validators['ProjectReference'] = {}
_msbuild_validators['ManifestResourceCompile'] = {}
# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
# ClCompile in MSBuild.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
# the schema of the MSBuild ClCompile settings.
# Options that have the same name in MSVS and MSBuild
_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_compile, 'AdditionalOptions', _string_list)
_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
_Same(_compile, 'BrowseInformationFile', _file_name)
_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
_Same(_compile, 'StringPooling', _boolean) # /GF
_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_compile, 'UseFullPaths', _boolean) # /FC
_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
_Same(_compile, 'XMLDocumentationFileName', _file_name)
_Same(_compile, 'AssemblerOutput',
_Enumeration(['NoListing',
'AssemblyCode', # /FA
'All', # /FAcs
'AssemblyAndMachineCode', # /FAc
'AssemblyAndSourceCode'])) # /FAs
_Same(_compile, 'BasicRuntimeChecks',
_Enumeration(['Default',
'StackFrameRuntimeCheck', # /RTCs
'UninitializedLocalUsageCheck', # /RTCu
'EnableFastChecks'])) # /RTC1
_Same(_compile, 'BrowseInformation',
_Enumeration(['false',
'true', # /FR
'true'])) # /Fr
_Same(_compile, 'CallingConvention',
_Enumeration(['Cdecl', # /Gd
'FastCall', # /Gr
'StdCall', # /Gz
'VectorCall'])) # /Gv
_Same(_compile, 'CompileAs',
_Enumeration(['Default',
'CompileAsC', # /TC
'CompileAsCpp'])) # /TP
_Same(_compile, 'DebugInformationFormat',
_Enumeration(['', # Disabled
'OldStyle', # /Z7
None,
'ProgramDatabase', # /Zi
'EditAndContinue'])) # /ZI
_Same(_compile, 'EnableEnhancedInstructionSet',
_Enumeration(['NotSet',
'StreamingSIMDExtensions', # /arch:SSE
'StreamingSIMDExtensions2', # /arch:SSE2
'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
'NoExtensions', # /arch:IA32 (vs2012+)
# This one only exists in the new msbuild format.
'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
]))
_Same(_compile, 'ErrorReporting',
_Enumeration(['None', # /errorReport:none
'Prompt', # /errorReport:prompt
'Queue'], # /errorReport:queue
new=['Send'])) # /errorReport:send"
_Same(_compile, 'ExceptionHandling',
_Enumeration(['false',
'Sync', # /EHsc
'Async'], # /EHa
new=['SyncCThrow'])) # /EHs
_Same(_compile, 'FavorSizeOrSpeed',
_Enumeration(['Neither',
'Speed', # /Ot
'Size'])) # /Os
_Same(_compile, 'FloatingPointModel',
_Enumeration(['Precise', # /fp:precise
'Strict', # /fp:strict
'Fast'])) # /fp:fast
_Same(_compile, 'InlineFunctionExpansion',
_Enumeration(['Default',
'OnlyExplicitInline', # /Ob1
'AnySuitable'], # /Ob2
new=['Disabled'])) # /Ob0
_Same(_compile, 'Optimization',
_Enumeration(['Disabled', # /Od
'MinSpace', # /O1
'MaxSpeed', # /O2
'Full'])) # /Ox
_Same(_compile, 'RuntimeLibrary',
_Enumeration(['MultiThreaded', # /MT
'MultiThreadedDebug', # /MTd
'MultiThreadedDLL', # /MD
'MultiThreadedDebugDLL'])) # /MDd
_Same(_compile, 'StructMemberAlignment',
_Enumeration(['Default',
'1Byte', # /Zp1
'2Bytes', # /Zp2
'4Bytes', # /Zp4
'8Bytes', # /Zp8
'16Bytes'])) # /Zp16
_Same(_compile, 'WarningLevel',
_Enumeration(['TurnOffAllWarnings', # /W0
'Level1', # /W1
'Level2', # /W2
'Level3', # /W3
'Level4'], # /W4
new=['EnableAllWarnings'])) # /Wall
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
_boolean) # /Gy
_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
_boolean) # /Oi
_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
_file_name) # Used with /Yc and /Yu
_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
_file_name) # /Fp
_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
_Enumeration(['NotUsing', # VS recognized '' for this value too.
'Create', # /Yc
'Use'])) # /Yu
_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
# MSVS options not found in MSBuild.
_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
_MSBuildOnly(_compile, 'CompileAsManaged',
_Enumeration([], new=['false',
'true'])) # /clr
_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
# Defines a setting that needs very customized processing
_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
# Directives for converting MSVS VCLinkerTool to MSBuild Link.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
# the schema of the MSBuild Link settings.
# Options that have the same name in MSVS and MSBuild
_Same(_link, 'AdditionalDependencies', _file_list)
_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
# /MANIFESTDEPENDENCY:
_Same(_link, 'AdditionalManifestDependencies', _file_list)
_Same(_link, 'AdditionalOptions', _string_list)
_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
_Same(_link, 'BaseAddress', _string) # /BASE
_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
_Same(_link, 'FunctionOrder', _file_name) # /ORDER
_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
_Same(_link, 'GenerateMapFile', _boolean) # /MAP
_Same(_link, 'HeapCommitSize', _string)
_Same(_link, 'HeapReserveSize', _string) # /HEAP
_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
_Same(_link, 'KeyFile', _file_name) # /KEYFILE
_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
_Same(_link, 'MapFileName', _file_name)
_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
_Same(_link, 'MergeSections', _string) # /MERGE
_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_link, 'OutputFile', _file_name) # /OUT
_Same(_link, 'PerUserRedirection', _boolean)
_Same(_link, 'Profile', _boolean) # /PROFILE
_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
_Same(_link, 'RegisterOutput', _boolean)
_Same(_link, 'SetChecksum', _boolean) # /RELEASE
_Same(_link, 'StackCommitSize', _string)
_Same(_link, 'StackReserveSize', _string) # /STACK
_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
_Same(_link, 'Version', _string) # /VERSION
_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
_subsystem_enumeration = _Enumeration(
['NotSet',
'Console', # /SUBSYSTEM:CONSOLE
'Windows', # /SUBSYSTEM:WINDOWS
'Native', # /SUBSYSTEM:NATIVE
'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
'EFI ROM', # /SUBSYSTEM:EFI_ROM
'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
new=['POSIX']) # /SUBSYSTEM:POSIX
_target_machine_enumeration = _Enumeration(
['NotSet',
'MachineX86', # /MACHINE:X86
None,
'MachineARM', # /MACHINE:ARM
'MachineEBC', # /MACHINE:EBC
'MachineIA64', # /MACHINE:IA64
None,
'MachineMIPS', # /MACHINE:MIPS
'MachineMIPS16', # /MACHINE:MIPS16
'MachineMIPSFPU', # /MACHINE:MIPSFPU
'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
None,
None,
None,
'MachineSH4', # /MACHINE:SH4
None,
'MachineTHUMB', # /MACHINE:THUMB
'MachineX64']) # /MACHINE:X64
_Same(_link, 'AssemblyDebug',
_Enumeration(['',
'true', # /ASSEMBLYDEBUG
'false'])) # /ASSEMBLYDEBUG:DISABLE
_Same(_link, 'CLRImageType',
_Enumeration(['Default',
'ForceIJWImage', # /CLRIMAGETYPE:IJW
'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
_Same(_link, 'CLRThreadAttribute',
_Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
_Same(_link, 'DataExecutionPrevention',
_Enumeration(['',
'false', # /NXCOMPAT:NO
'true'])) # /NXCOMPAT
_Same(_link, 'Driver',
_Enumeration(['NotSet',
'Driver', # /Driver
'UpOnly', # /DRIVER:UPONLY
'WDM'])) # /DRIVER:WDM
_Same(_link, 'LinkTimeCodeGeneration',
_Enumeration(['Default',
'UseLinkTimeCodeGeneration', # /LTCG
'PGInstrument', # /LTCG:PGInstrument
'PGOptimization', # /LTCG:PGOptimize
'PGUpdate'])) # /LTCG:PGUpdate
_Same(_link, 'ShowProgress',
_Enumeration(['NotSet',
'LinkVerbose', # /VERBOSE
'LinkVerboseLib'], # /VERBOSE:Lib
new=['LinkVerboseICF', # /VERBOSE:ICF
'LinkVerboseREF', # /VERBOSE:REF
'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
'LinkVerboseCLR'])) # /VERBOSE:CLR
_Same(_link, 'SubSystem', _subsystem_enumeration)
_Same(_link, 'TargetMachine', _target_machine_enumeration)
_Same(_link, 'UACExecutionLevel',
_Enumeration(['AsInvoker', # /level='asInvoker'
'HighestAvailable', # /level='highestAvailable'
'RequireAdministrator'])) # /level='requireAdministrator'
_Same(_link, 'MinimumRequiredVersion', _string)
_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
# Options found in MSVS that have been renamed in MSBuild.
_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
_Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
'PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin'], # /ERRORREPORT:QUEUE
new=['SendErrorReport'])) # /ERRORREPORT:SEND
_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
_file_list) # /NODEFAULTLIB
_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
_Moved(_link, 'GenerateManifest', '', _boolean)
_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
_Moved(_link, 'LinkIncremental', '', _newly_boolean)
_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
_MSBuildOnly(_link, 'ForceFileOutput',
_Enumeration([], new=['Enabled', # /FORCE
# /FORCE:MULTIPLE
'MultiplyDefinedSymbolOnly',
'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
_MSBuildOnly(_link, 'CreateHotPatchableImage',
_Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
'X86Image', # /FUNCTIONPADMIN:5
'X64Image', # /FUNCTIONPADMIN:6
'ItaniumImage'])) # /FUNCTIONPADMIN:16
_MSBuildOnly(_link, 'CLRSupportLastError',
_Enumeration([], new=['Enabled', # /CLRSupportLastError
'Disabled', # /CLRSupportLastError:NO
# /CLRSupportLastError:SYSTEMDLL
'SystemDlls']))
# Directives for converting VCResourceCompilerTool to ResourceCompile.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
# the schema of the MSBuild ResourceCompile settings.
_Same(_rc, 'AdditionalOptions', _string_list)
_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_rc, 'Culture', _Integer(msbuild_base=16))
_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
_Same(_rc, 'ResourceOutputFileName', _string) # /fo
_Same(_rc, 'ShowProgress', _boolean) # /v
# There is no UI in VisualStudio 2008 to set the following properties.
# However they are found in CL and other tools. Include them here for
# completeness, as they are very likely to have the same usage pattern.
_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
# MSBuild options not found in MSVS.
_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
# Directives for converting VCMIDLTool to Midl.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
# the schema of the MSBuild Midl settings.
_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
_Same(_midl, 'AdditionalOptions', _string_list)
_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
_Same(_midl, 'GenerateTypeLibrary', _boolean)
_Same(_midl, 'HeaderFileName', _file_name) # /h
_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
_Same(_midl, 'OutputDirectory', _string) # /out
_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
_Same(_midl, 'ProxyFileName', _file_name) # /proxy
_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
_Same(_midl, 'WarnAsError', _boolean) # /WX
_Same(_midl, 'DefaultCharType',
_Enumeration(['Unsigned', # /char unsigned
'Signed', # /char signed
'Ascii'])) # /char ascii7
_Same(_midl, 'TargetEnvironment',
_Enumeration(['NotSet',
'Win32', # /env win32
'Itanium', # /env ia64
'X64'])) # /env x64
_Same(_midl, 'EnableErrorChecks',
_Enumeration(['EnableCustom',
'None', # /error none
'All'])) # /error all
_Same(_midl, 'StructMemberAlignment',
_Enumeration(['NotSet',
'1', # Zp1
'2', # Zp2
'4', # Zp4
'8'])) # Zp8
_Same(_midl, 'WarningLevel',
_Enumeration(['0', # /W0
'1', # /W1
'2', # /W2
'3', # /W3
'4'])) # /W4
_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
_boolean) # /robust
# MSBuild options not found in MSVS.
_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
_MSBuildOnly(_midl, 'GenerateClientFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'GenerateServerFiles',
_Enumeration([], new=['Stub', # /client stub
'None'])) # /client none
_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_midl, 'TypeLibFormat',
_Enumeration([], new=['NewFormat', # /newtlb
'OldFormat'])) # /oldtlb
# Directives for converting VCLibrarianTool to Lib.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
# the schema of the MSBuild Lib settings.
_Same(_lib, 'AdditionalDependencies', _file_list)
_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
_Same(_lib, 'AdditionalOptions', _string_list)
_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
_Same(_lib, 'OutputFile', _file_name) # /OUT
_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
_Same(_lib, 'TargetMachine', _target_machine_enumeration)
# TODO(jeanluc) _link defines the same value that gets moved to
# ProjectReference. We may want to validate that they are consistent.
_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
_MSBuildOnly(_lib, 'ErrorReporting',
_Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
'QueueForNextLogin', # /ERRORREPORT:QUEUE
'SendErrorReport', # /ERRORREPORT:SEND
'NoErrorReport'])) # /ERRORREPORT:NONE
_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
_MSBuildOnly(_lib, 'Verbose', _boolean)
# Directives for converting VCManifestTool to Mt.
# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
# the schema of the MSBuild Lib settings.
# Options that have the same name in MSVS and MSBuild
_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
_Same(_manifest, 'AdditionalOptions', _string_list)
_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
_Same(_manifest, 'ComponentFileName', _file_name) # /dll
_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
_Same(_manifest, 'OutputManifestFile', _file_name) # /out
_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
# Options that have moved location.
_MovedAndRenamed(_manifest, 'ManifestResourceFile',
'ManifestResourceCompile',
'ResourceOutputFileName',
_file_name)
_Moved(_manifest, 'EmbedManifest', '', _boolean)
# MSVS options not found in MSBuild.
_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
# MSBuild options not found in MSVS.
_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
_file_name) # /managedassemblyname
_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
# Directives for MASM.
# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
# MSBuild MASM settings.
# Options that have the same name in MSVS and MSBuild.
_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
| mit |
rruebner/odoo | openerp/osv/orm.py | 126 | 6167 | import simplejson
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
# extra definitions for backward compatibility
browse_record_list = BaseModel
class browse_record(object):
""" Pseudo-class for testing record instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and len(inst) <= 1
class browse_null(object):
""" Pseudo-class for testing null instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and not inst
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
| agpl-3.0 |
kvar/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_settings.py | 38 | 2607 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2018, Nikhil Jain <nikjain@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_settings
author: "Nikhil Jain (@jainnikhil30)"
version_added: "2.7"
short_description: Modify Ansible Tower settings.
description:
- Modify Ansible Tower settings. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name of setting to modify
required: True
value:
description:
- Value to be modified for given setting.
required: True
extends_documentation_fragment: tower
'''
RETURN = ''' # '''
EXAMPLES = '''
- name: Set the value of AWX_PROOT_BASE_PATH
tower_settings:
name: AWX_PROOT_BASE_PATH
value: "/tmp"
register: testing_settings
- name: Set the value of AWX_PROOT_SHOW_PATHS
tower_settings:
name: "AWX_PROOT_SHOW_PATHS"
value: "'/var/lib/awx/projects/', '/tmp'"
register: testing_settings
- name: Set the LDAP Auth Bind Password
tower_settings:
name: "AUTH_LDAP_BIND_PASSWORD"
value: "Password"
no_log: true
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
name=dict(required=True),
value=dict(required=True),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=False
)
json_output = {}
name = module.params.get('name')
value = module.params.get('value')
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
try:
setting = tower_cli.get_resource('setting')
result = setting.modify(setting=name, value=value)
json_output['id'] = result['id']
json_output['value'] = result['value']
except (exc.ConnectionError, exc.BadRequest, exc.AuthError) as excinfo:
module.fail_json(msg='Failed to modify the setting: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
guorendong/iridium-browser-ubuntu | third_party/pywebsocket/src/test/test_handshake.py | 14 | 8420 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for handshake._base module."""
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket.common import ExtensionParameter
from mod_pywebsocket.common import ExtensionParsingException
from mod_pywebsocket.common import format_extensions
from mod_pywebsocket.common import parse_extensions
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import validate_subprotocol
class ValidateSubprotocolTest(unittest.TestCase):
"""A unittest for validate_subprotocol method."""
def test_validate_subprotocol(self):
# Should succeed.
validate_subprotocol('sample')
validate_subprotocol('Sample')
validate_subprotocol('sample\x7eprotocol')
# Should fail.
self.assertRaises(HandshakeException,
validate_subprotocol,
'')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x09protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x19protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x20protocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
'sample\x7fprotocol')
self.assertRaises(HandshakeException,
validate_subprotocol,
# "Japan" in Japanese
u'\u65e5\u672c')
_TEST_TOKEN_EXTENSION_DATA = [
('foo', [('foo', [])]),
('foo; bar', [('foo', [('bar', None)])]),
('foo; bar=baz', [('foo', [('bar', 'baz')])]),
('foo; bar=baz; car=cdr', [('foo', [('bar', 'baz'), ('car', 'cdr')])]),
('foo; bar=baz, car; cdr',
[('foo', [('bar', 'baz')]), ('car', [('cdr', None)])]),
('a, b, c, d',
[('a', []), ('b', []), ('c', []), ('d', [])]),
]
_TEST_QUOTED_EXTENSION_DATA = [
('foo; bar=""', [('foo', [('bar', '')])]),
('foo; bar=" baz "', [('foo', [('bar', ' baz ')])]),
('foo; bar=",baz;"', [('foo', [('bar', ',baz;')])]),
('foo; bar="\\\r\\\nbaz"', [('foo', [('bar', '\r\nbaz')])]),
('foo; bar="\\"baz"', [('foo', [('bar', '"baz')])]),
('foo; bar="\xbbbaz"', [('foo', [('bar', '\xbbbaz')])]),
]
_TEST_REDUNDANT_TOKEN_EXTENSION_DATA = [
('foo \t ', [('foo', [])]),
('foo; \r\n bar', [('foo', [('bar', None)])]),
('foo; bar=\r\n \r\n baz', [('foo', [('bar', 'baz')])]),
('foo ;bar = baz ', [('foo', [('bar', 'baz')])]),
('foo,bar,,baz', [('foo', []), ('bar', []), ('baz', [])]),
]
_TEST_REDUNDANT_QUOTED_EXTENSION_DATA = [
('foo; bar="\r\n \r\n baz"', [('foo', [('bar', ' baz')])]),
]
class ExtensionsParserTest(unittest.TestCase):
def _verify_extension_list(self, expected_list, actual_list):
"""Verifies that ExtensionParameter objects in actual_list have the
same members as extension definitions in expected_list. Extension
definition used in this test is a pair of an extension name and a
parameter dictionary.
"""
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
(name, parameters) = expected
self.assertEqual(name, actual._name)
self.assertEqual(parameters, actual._parameters)
def test_parse(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=False))
for formatted_string, unused_definition in _TEST_QUOTED_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions,
formatted_string, False)
def test_parse_with_allow_quoted_string(self):
for formatted_string, definition in _TEST_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=True))
for formatted_string, definition in _TEST_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=True))
def test_parse_redundant_data(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=False))
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions,
formatted_string, False)
def test_parse_redundant_data_with_allow_quoted_string(self):
for (formatted_string,
definition) in _TEST_REDUNDANT_TOKEN_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=True))
for (formatted_string,
definition) in _TEST_REDUNDANT_QUOTED_EXTENSION_DATA:
self._verify_extension_list(
definition, parse_extensions(formatted_string,
allow_quoted_string=True))
def test_parse_bad_data(self):
_TEST_BAD_EXTENSION_DATA = [
('foo; ; '),
('foo; a a'),
('foo foo'),
(',,,'),
('foo; bar='),
('foo; bar="hoge'),
('foo; bar="a\r"'),
('foo; bar="\\\xff"'),
('foo; bar=\ra'),
]
for formatted_string in _TEST_BAD_EXTENSION_DATA:
self.assertRaises(
ExtensionParsingException, parse_extensions, formatted_string)
class FormatExtensionsTest(unittest.TestCase):
def test_format_extensions(self):
for formatted_string, definitions in _TEST_TOKEN_EXTENSION_DATA:
extensions = []
for definition in definitions:
(name, parameters) = definition
extension = ExtensionParameter(name)
extension._parameters = parameters
extensions.append(extension)
self.assertEqual(
formatted_string, format_extensions(extensions))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| bsd-3-clause |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/test/test_sysconfig.py | 80 | 16880 | import unittest
import sys
import os
import subprocess
import shutil
from copy import copy
from test.support import (run_unittest, TESTFN, unlink, check_warnings,
captured_stdout, skip_unless_symlink)
import sysconfig
from sysconfig import (get_paths, get_platform, get_config_vars,
get_path, get_path_names, _INSTALL_SCHEMES,
_get_default_scheme, _expand_vars,
get_scheme_names, get_config_var, _main)
import _osx_support
class TestSysConfig(unittest.TestCase):
def setUp(self):
super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._set_uname(('',)*5)
os.uname = self._get_uname
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = sysconfig._CONFIG_VARS, copy(sysconfig._CONFIG_VARS)
self._added_envvars = []
self._changed_envvars = []
for var in ('MACOSX_DEPLOYMENT_TARGET', 'PATH'):
if var in os.environ:
self._changed_envvars.append((var, os.environ[var]))
else:
self._added_envvars.append(var)
def tearDown(self):
sys.path[:] = self.sys_path
self._cleanup_testfn()
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = self._config_vars[0]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(self._config_vars[1])
for var, value in self._changed_envvars:
os.environ[var] = value
for var in self._added_envvars:
os.environ.pop(var, None)
super(TestSysConfig, self).tearDown()
def _set_uname(self, uname):
self._uname = os.uname_result(uname)
def _get_uname(self):
return self._uname
def _cleanup_testfn(self):
path = TESTFN
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def test_get_path_names(self):
self.assertEqual(get_path_names(), sysconfig._SCHEME_KEYS)
def test_get_paths(self):
scheme = get_paths()
default_scheme = _get_default_scheme()
wanted = _expand_vars(default_scheme, None)
wanted = sorted(wanted.items())
scheme = sorted(scheme.items())
self.assertEqual(scheme, wanted)
def test_get_path(self):
# XXX make real tests here
for scheme in _INSTALL_SCHEMES:
for name in _INSTALL_SCHEMES[scheme]:
res = get_path(name, scheme)
def test_get_config_vars(self):
cvars = get_config_vars()
self.assertIsInstance(cvars, dict)
self.assertTrue(cvars)
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'PowerPC'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-ppc')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-ppc64')
finally:
sys.maxsize = maxint
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
maxint = sys.maxsize
try:
sys.maxsize = 2147483647
self.assertEqual(get_platform(), 'macosx-10.3-i386')
sys.maxsize = 9223372036854775807
self.assertEqual(get_platform(), 'macosx-10.3-x86_64')
finally:
sys.maxsize = maxint
# macbook with fat binaries (fat, universal or fat64)
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
_osx_support._remove_original_values(get_config_vars())
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3' % arch)
self.assertEqual(get_platform(), 'macosx-10.4-%s' % arch)
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_get_config_h_filename(self):
config_h = sysconfig.get_config_h_filename()
self.assertTrue(os.path.isfile(config_h), config_h)
def test_get_scheme_names(self):
wanted = ('nt', 'nt_user', 'osx_framework_user',
'posix_home', 'posix_prefix', 'posix_user')
self.assertEqual(get_scheme_names(), wanted)
@skip_unless_symlink
def test_symlink(self):
# On Windows, the EXE needs to know where pythonXY.dll is at so we have
# to add the directory to the path.
if sys.platform == "win32":
os.environ["PATH"] = "{};{}".format(
os.path.dirname(sys.executable), os.environ["PATH"])
# Issue 7880
def get(python):
cmd = [python, '-c',
'import sysconfig; print(sysconfig.get_platform())']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=os.environ)
return p.communicate()
real = os.path.realpath(sys.executable)
link = os.path.abspath(TESTFN)
os.symlink(real, link)
try:
self.assertEqual(get(real), get(link))
finally:
unlink(link)
def test_user_similar(self):
# Issue #8759: make sure the posix scheme for the users
# is similar to the global posix_prefix one
base = get_config_var('base')
user = get_config_var('userbase')
# the global scheme mirrors the distinction between prefix and
# exec-prefix but not the user scheme, so we have to adapt the paths
# before comparing (issue #9100)
adapt = sys.base_prefix != sys.base_exec_prefix
for name in ('stdlib', 'platstdlib', 'purelib', 'platlib'):
global_path = get_path(name, 'posix_prefix')
if adapt:
global_path = global_path.replace(sys.exec_prefix, sys.base_prefix)
base = base.replace(sys.exec_prefix, sys.base_prefix)
elif sys.base_prefix != sys.prefix:
# virtual environment? Likewise, we have to adapt the paths
# before comparing
global_path = global_path.replace(sys.base_prefix, sys.prefix)
base = base.replace(sys.base_prefix, sys.prefix)
user_path = get_path(name, 'posix_user')
self.assertEqual(user_path, global_path.replace(base, user, 1))
def test_main(self):
# just making sure _main() runs and returns things in the stdout
with captured_stdout() as output:
_main()
self.assertTrue(len(output.getvalue().split('\n')) > 0)
@unittest.skipIf(sys.platform == "win32", "Does not apply to Windows")
def test_ldshared_value(self):
ldflags = sysconfig.get_config_var('LDFLAGS')
ldshared = sysconfig.get_config_var('LDSHARED')
self.assertIn(ldflags, ldshared)
@unittest.skipUnless(sys.platform == "darwin", "test only relevant on MacOSX")
def test_platform_in_subprocess(self):
my_platform = sysconfig.get_platform()
# Test without MACOSX_DEPLOYMENT_TARGET in the environment
env = os.environ.copy()
if 'MACOSX_DEPLOYMENT_TARGET' in env:
del env['MACOSX_DEPLOYMENT_TARGET']
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
# Test with MACOSX_DEPLOYMENT_TARGET in the environment, and
# using a value that is unlikely to be the default one.
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
p = subprocess.Popen([
sys.executable, '-c',
'import sysconfig; print(sysconfig.get_platform())',
],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
env=env)
test_platform = p.communicate()[0].strip()
test_platform = test_platform.decode('utf-8')
status = p.wait()
self.assertEqual(status, 0)
self.assertEqual(my_platform, test_platform)
def test_srcdir(self):
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
self.assertTrue(os.path.isabs(srcdir), srcdir)
self.assertTrue(os.path.isdir(srcdir), srcdir)
if sysconfig._PYTHON_BUILD:
# The python executable has not been installed so srcdir
# should be a full source checkout.
Python_h = os.path.join(srcdir, 'Include', 'Python.h')
self.assertTrue(os.path.exists(Python_h), Python_h)
self.assertTrue(sysconfig._is_python_source_dir(srcdir))
elif os.name == 'posix':
makefile_dir = os.path.dirname(sysconfig.get_makefile_filename())
# Issue #19340: srcdir has been realpath'ed already
makefile_dir = os.path.realpath(makefile_dir)
self.assertEqual(makefile_dir, srcdir)
def test_srcdir_independent_of_cwd(self):
# srcdir should be independent of the current working directory
# See Issues #15322, #15364.
srcdir = sysconfig.get_config_var('srcdir')
cwd = os.getcwd()
try:
os.chdir('..')
srcdir2 = sysconfig.get_config_var('srcdir')
finally:
os.chdir(cwd)
self.assertEqual(srcdir, srcdir2)
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_deprecation(self):
self.assertWarns(DeprecationWarning,
sysconfig.get_config_var, 'SO')
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_value(self):
with check_warnings(('', DeprecationWarning)):
self.assertEqual(sysconfig.get_config_var('SO'),
sysconfig.get_config_var('EXT_SUFFIX'))
@unittest.skipIf(sysconfig.get_config_var('EXT_SUFFIX') is None,
'EXT_SUFFIX required for this test')
def test_SO_in_vars(self):
vars = sysconfig.get_config_vars()
self.assertIsNotNone(vars['SO'])
self.assertEqual(vars['SO'], vars['EXT_SUFFIX'])
class MakefileTests(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith('win'),
'Test is not Windows compatible')
def test_get_makefile_filename(self):
makefile = sysconfig.get_makefile_filename()
self.assertTrue(os.path.isfile(makefile), makefile)
def test_parse_makefile(self):
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as makefile:
print("var1=a$(VAR2)", file=makefile)
print("VAR2=b$(var3)", file=makefile)
print("var3=42", file=makefile)
print("var4=$/invalid", file=makefile)
print("var5=dollar$$5", file=makefile)
vars = sysconfig._parse_makefile(TESTFN)
self.assertEqual(vars, {
'var1': 'ab42',
'VAR2': 'b42',
'var3': 42,
'var4': '$/invalid',
'var5': 'dollar$5',
})
def test_main():
run_unittest(TestSysConfig, MakefileTests)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
alexandrucoman/vbox-nova-driver | nova/virt/xenapi/fake.py | 6 | 38578 | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
import uuid
from xml.sax import saxutils
import zlib
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.virt.xenapi.client import session as xenapi_session
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
LOG = logging.getLogger(__name__)
def reset():
for c in _CLASSES:
_db_content[c] = {}
host = create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
resident_on=host)
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = random.randrange(1, 1 << 16)
resident_on = _db_content['host'].keys()[0]
else:
domid = -1
resident_on = ''
vm_rec = kwargs.copy()
vm_rec.update({'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on})
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', -1)
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = _db_content['host'].keys()[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return _db_content[table].keys()
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query[:4] == 'not ':
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if query[:5] != 'field':
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return {str(i): self.details[i] for i in range(len(self.details))}
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return _db_content['pool'].values()[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = rec['userdevice']
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config',
vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
ref = None
rec = None
for ref, rec in _db_content['SR'].iteritems():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config',
vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
# Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({
'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_uuid': 'fb97583b-baa1-452d-850e-819d95285def',
'host_name-label': 'fake-xenhost',
'host_name-description': 'Default install of XenServer',
'host_hostname': 'fake-xenhost',
'host_ip_address': '10.219.10.24',
'enabled': 'true',
'host_capabilities': ['xen-3.0-x86_64',
'xen-3.0-x86_32p',
'hvm-3.0-x86_32',
'hvm-3.0-x86_32p',
'hvm-3.0-x86_64'],
'host_other-config': {
'agent_start_time': '1412774967.',
'iscsi_iqn': 'iqn.2014-10.org.example:39fa9ee3',
'boot_time': '1412774885.',
},
'host_cpu_info': {
'physical_features': '0098e3fd-bfebfbff-00000001-28100800',
'modelname': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'vendor': 'GenuineIntel',
'features': '0098e3fd-bfebfbff-00000001-28100800',
'family': 6,
'maskable': 'full',
'cpu_count': 4,
'socket_count': '1',
'flags': 'fpu de tsc msr pae mce cx8 apic sep mtrr mca '
'cmov pat clflush acpi mmx fxsr sse sse2 ss ht '
'nx constant_tsc nonstop_tsc aperfmperf pni vmx '
'est ssse3 sse4_1 sse4_2 popcnt hypervisor ida '
'tpr_shadow vnmi flexpriority ept vpid',
'stepping': 5,
'model': 30,
'features_after_reboot': '0098e3fd-bfebfbff-00000001-28100800',
'speed': '2394.086'
},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise Failure('Guest does not have a console')
return base64.b64encode(zlib.compress("dom_id: %s" % dom_id))
def _plugin_nova_plugin_version_get_version(self, method, args):
return pickle.dumps("1.2")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def host_call_plugin(self, _1, _2, plugin, method, args):
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise Failure(['VM_BAD_POWER_STATE',
'fake-opaque-ref', db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = random.randrange(1, 1 << 16)
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = -1
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return "fake_migrate_data"
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = str(uuid.uuid4())
_session_info = {'uuid': str(uuid.uuid4()),
'this_host': _db_content['host'].keys()[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise Failure(['HANDLE_INVALID', cls, ref])
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
if after_hook in globals():
globals()[after_hook](ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = globals().get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise Failure(['MESSAGE_PARAMETER_COUNT_MISMATCH',
expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.iteritems():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher(object):
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
| apache-2.0 |
henry-ajere/rad2py | psp2py/modules/statistics.py | 8 | 4368 | #!/usr/bin/env python
# coding: utf8
import math
from integration import f_student_t_distribution, simpson_rule_integrate
def mean(values):
"Calculate the average of the numbers given"
return sum(values) / float(len(values))
def calc_correlation(x_values, y_values):
"Calculate strength of a relationship between two sets of data"
# calculate aux variables
n = len(x_values)
sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)])
sum_x = sum([(x_values[i]) for i in range(n)])
sum_y = sum([(y_values[i]) for i in range(n)])
sum_x2 = sum([(x_values[i] ** 2) for i in range(n)])
sum_y2 = sum([(y_values[i] ** 2) for i in range(n)])
# calculate corelation
r = (n * sum_xy - (sum_x * sum_y)) / math.sqrt((n * sum_x2 - sum_x ** 2) * (n * sum_y2 - sum_y ** 2))
return r
def calc_significance(x_values, y_values):
"Calculate the significance (likelihood of two set of data correlation)"
n = len (x_values)
r = calc_correlation(x_values, y_values)
r2 = r**2
t = abs(r)*math.sqrt(n - 2)/math.sqrt(1 - r**2)
return t, r2, n
def calc_linear_regression(x_values, y_values):
"Calculate the linear regression parameters for a set of n values"
# calculate aux variables
x_avg = mean(x_values)
y_avg = mean(y_values)
n = len(x_values)
sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)])
sum_x2 = sum([(x_values[i] ** 2) for i in range(n)])
# calculate regression coefficients
b1 = (sum_xy - (n * x_avg * y_avg)) / (sum_x2 - n * (x_avg ** 2))
b0 = y_avg - b1 * x_avg
return (b0, b1)
def calc_standard_deviation(values):
"Calculate the standard deviation of a list of number values"
x_avg = mean(values)
n = len(values)
sd = math.sqrt(sum([(x_i - x_avg)**2
for x_i in values]) / float(n))
return sd, x_avg
def calc_student_t_probability(x, n):
"Integrate t distribution from -infinity to x with n degrees of freedom"
inf = float("infinity")
p = simpson_rule_integrate(f_student_t_distribution(n), -inf, x)
return p
def calc_double_sided_student_t_probability(t, n):
"Calculate the p-value using a double sided student t distribution"
# integrate a finite area from the origin to t
p_aux = simpson_rule_integrate(f_student_t_distribution(n), 0, t)
# return the area of the two tails of the distribution (symmetrical)
return (0.5 - p_aux) * 2
def calc_double_sided_student_t_value(p, n):
"Calculate the t-value using a double sided student t distribution"
# replaces table lookup, thanks to http://statpages.org/pdfs.html
v = dv = 0.5
t = 0
while dv > 0.000001:
t = 1 / v - 1
dv = dv / 2
if calc_double_sided_student_t_probability(t, n) > p:
v = v - dv
else:
v = v + dv
return t
def calc_variance(x_values, y_values, b0, b1):
"Calculate the mean square deviation of the linear regeression line"
# take the variance from the regression line instead of the data average
sum_aux = sum([(y - b0 - b1 * x) ** 2 for x, y in zip(x_values, y_values)])
n = float(len(x_values))
return (1 / (n - 2.0)) * sum_aux
def calc_prediction_interval(x_values, y_values, x_k, y_k, alpha):
"""Calculate the linear regression parameters for a set of n values
then calculate the upper and lower prediction interval
"""
# calculate aux variables
x_avg = mean(x_values)
y_avg = mean(y_values)
n = len(x_values)
sum_xy = sum([(x_values[i] * y_values[i]) for i in range(n)])
sum_x2 = sum([(x_values[i] ** 2) for i in range(n)])
# calculate regression coefficients
b1 = (sum_xy - (n * x_avg * y_avg)) / (sum_x2 - n * (x_avg ** 2))
b0 = y_avg - b1 * x_avg
# calculate the t-value for the given alpha p-value
t = calc_double_sided_student_t_value(1 - alpha, n - 2)
# calculate the standard deviation
sigma = math.sqrt(calc_variance(x_values, y_values, b0, b1))
# calculate the range
sum_xi_xavg = sum([(x - x_avg) ** 2 for x in x_values], 0.0)
aux = 1 + (1 / float(n)) + ((x_k - x_avg) ** 2) / sum_xi_xavg
p_range = t * sigma * math.sqrt(aux)
# combine the range with the x_k projection:
return b0, b1, p_range, y_k + p_range, y_k - p_range, t
| gpl-3.0 |
andrewebdev/tehblog | src/tehblog/templatetags/tehblog_tags.py | 1 | 3510 | # -*- coding: utf-8 -*-
# © Copyright 2009 Andre Engelbrecht. All Rights Reserved.
# This script is licensed under the BSD Open Source Licence
# Please see the text file LICENCE for more information
# If this script is distributed, it must be accompanied by the Licence
import re
from django import template
from django.db.models import Count
from tagging.models import Tag, TaggedItem
from tehblog.models import Entry, Category
register = template.Library()
@register.inclusion_tag('tehblog/tags/category_list.html')
def category_list(count=None):
"""
Renders a list of categories. Only categories that contains published
blog entries will be returned to the tag and rendered.
The number of categories returned can be restricted with the ``count``
argument
"""
return {
'category_list': Category.objects.all().filter(
entry___statemachine__state='published').distinct()[:count]
}
@register.inclusion_tag('tehblog/tags/tag_list.html')
def tag_list(slice_count=None):
"""
Requires django-tagging.
Renders a list of Tags used for all published blog entries.
``slice_count`` is the number of items that the list in the
template should be sliced to
"""
slice_count = str(slice_count)
try:
tag_list = Tag.objects.usage_for_model(Entry, counts=True,
filters={ '_statemachine__state': 'published' })
except:
pass
return locals()
@register.inclusion_tag('tehblog/tags/date_hierarchy.html')
def date_hierarchy():
"""
This tag will show a dynamic date hierarchy, which can
be used to search for entries in specific years, months or days.
Note that this tag is dependant on the generic views specified in
urls. If you decide to customize the urls and views in any way, then
this template tag may not function as intended.
usage:
{% load tehblog_tags %}
{% date_hierarchy %}
"""
return {
'hierarchy': Entry.objects.public().order_by('publish_date')\
.values('publish_date')
}
@register.inclusion_tag('tehblog/tags/date_list.html')
def date_list(count=None):
"""
This is a simpler version of the date_hierarchy tag, and will show
recent dates as a list showing the month and year.
Output would typically be: "November 2009"
You can also pass the ``count`` attribute to limit the results. To
return a full list of dates, use ``None``
Usage:
{% load tehblog_tags %}
{% date_list %}
or:
{% date_list 30 %}
"""
date_list = Entry.objects.public().dates('publish_date', 'month',
order="DESC")[:count]
return locals()
@register.inclusion_tag('tehblog/tags/related_entries.html')
def related_entries(entry, count=5):
"""
Renders a list of related blog entries based on the Entry Tags.
This tag will only work if django-tagging is installed.
usage:
{% related_entries entry %}
"""
try:
related_blog_entries = TaggedItem.objects.get_related(
entry, Entry, num=count)
except: return {}
return {
'related_entries': related_blog_entries,
}
## Filters
@register.filter(name='entries_for_month')
def entries_for_month(date_value):
"""
Returns the number of entries that was published on a specific
date.
"""
count = Entry.objects.public().filter(
publish_date__year=date_value.year,
publish_date__month=date_value.month,
).count()
return count | bsd-3-clause |
cancro7/gem5 | src/mem/slicc/ast/MachineAST.py | 27 | 3157 | # Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
# Copyright (c) 2009 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from slicc.ast.DeclAST import DeclAST
from slicc.symbols import StateMachine, Type
class MachineAST(DeclAST):
def __init__(self, slicc, mtype, pairs_ast, config_parameters, decls):
super(MachineAST, self).__init__(slicc, pairs_ast)
self.ident = mtype.value
self.pairs_ast = pairs_ast
self.config_parameters = config_parameters
self.decls = decls
def __repr__(self):
return "[Machine: %r]" % self.ident
def files(self, parent=None):
s = set(('%s_Controller.cc' % self.ident,
'%s_Controller.hh' % self.ident,
'%s_Controller.py' % self.ident,
'%s_Transitions.cc' % self.ident,
'%s_Wakeup.cc' % self.ident))
s |= self.decls.files(self.ident)
return s
def generate(self):
# Make a new frame
self.symtab.pushFrame()
# Create a new machine
machine = StateMachine(self.symtab, self.ident, self.location,
self.pairs, self.config_parameters)
self.symtab.newCurrentMachine(machine)
# Generate code for all the internal decls
self.decls.generate()
# Build the transition table
machine.buildTable()
# Pop the frame
self.symtab.popFrame()
def findMachines(self):
mtype = self.ident
machine_type = self.symtab.find("MachineType", Type)
if not machine_type.checkEnum(mtype):
self.error("Duplicate machine name: %s:%s" % (machine_type, mtype))
| bsd-3-clause |
rhertzog/django | tests/forms_tests/tests/test_media.py | 76 | 23851 | # -*- coding: utf-8 -*-
from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils.encoding import force_text
@override_settings(
STATIC_URL='http://media.example.com/static/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(
css={'all': ('path/to/css1', '/path/to/css2')},
js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'),
)
self.assertEqual(
str(m),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(
str(m3),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Media objects can be interrogated by media type
self.assertEqual(
str(w1.media['css']),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />"""
)
self.assertEqual(
str(w1.media['js']),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(
str(w1.media + w2.media + w3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Check that media addition hasn't affected the original objects
self.assertEqual(
str(w1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(
str(w6.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(
str(w7.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>"""
)
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(
str(w8.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(
str(w9.media),
"""<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>"""
)
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(
str(w11.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(
str(w12.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(
str(multimedia.media),
"""<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(
str(mymulti.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(
str(f1.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(
str(f1.media + f2.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>"""
)
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(
str(f3.media),
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
)
# Media works in templates
self.assertEqual(
Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})),
"""<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>"""
"""<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />"""
)
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(force_text(media), media.__html__())
| bsd-3-clause |
nzmm/granite | pages/admin.py | 1 | 1080 | from django.contrib import admin
from django.contrib.auth.models import User
from reversion.admin import VersionAdmin
from pages.models import (
Template,
Page,
)
from pages.forms import (
TemplateAdminForm,
PageAdminForm
)
@admin.register(Template)
class TemplateAdmin(VersionAdmin):
list_display = ('handle', 'site', 'template_path')
list_filter = ('site__name',)
readonly_fields = ('template_path', 'fs_full_path')
form = TemplateAdminForm
@admin.register(Page)
class PageAdmin(VersionAdmin):
list_display = ('title', 'site', 'handle', 'role', 'description')
list_filter = ('site__name',)
ordering = ('site', 'role', 'title')
form = PageAdminForm
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
if db_field.name == 'page_author':
kwargs["initial"] = request.user
if not request.user.is_superuser:
kwargs["queryset"] = User.objects.filter(pk=request.user.pk)
return super(PageAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
| gpl-3.0 |
JungWinter/HongikFood | app/message.py | 1 | 3222 | from .keyboard import Keyboard
from json import loads, dumps
class Message:
# Message클래스를 생성할 때 기본적인 틀만 구현하고
# 값들은 던져주면 알아서 메시지를 리턴한다
baseKeyboard = {
"type": "buttons",
"buttons": Keyboard.buttons,
}
baseMessage = {
"message": {
"text": "",
},
"keyboard": baseKeyboard
}
# Uesage : baseMessage["message"].update(baseWeekend)
baseWeekend = {
"message_button": {
"label": "이번주 메뉴 보기",
"url": "http://apps.hongik.ac.kr/food/food.php"
}
}
def __init__(self):
self.returnedMessage = None
def getMessage(self):
return self.returnedMessage
class BaseMessage(Message):
def __init__(self):
super().__init__()
self.returnedMessage = loads(dumps(Message.baseMessage))
def updateMessage(self, message):
self.returnedMessage["message"]["text"] = message
def updateKeyboard(self, argKeyboard):
keyboard = Message.baseKeyboard
keyboard["buttons"] = argKeyboard
self.returnedMessage["keyboard"] = keyboard
def add_photo(self, url, width, height):
photo_message = {
"photo": {
"url": "http://www.hongik.ac.kr/front/images/local/header_logo.png",
"width": 198,
"height": 45,
},
}
photo_message["photo"]["url"] = url
photo_message["photo"]["width"] = width
photo_message["photo"]["height"] = height
self.returnedMessage["message"].update(photo_message)
class EvaluateMessage(BaseMessage):
def __init__(self, message, step):
'''
step 1 : 식단 평가하기 -> 장소
step 2 : 장소 -> 시간대
step 3 : 시간대 -> 점수
step 4 : 점수 -> 끝
'''
super().__init__()
self.updateMessage(message)
if step == 1:
self.updateKeyboard(Keyboard.placeButtons)
elif step == 2:
self.updateKeyboard(Keyboard.timeButtons)
elif step == 3:
self.updateKeyboard(Keyboard.scoreButtons)
elif step == 4:
self.updateKeyboard(Keyboard.homeButtons)
else:
raise
class SummaryMenuMessage(BaseMessage):
def __init__(self, message, isToday):
super().__init__()
self.updateMessage(message)
if isToday:
self.updateKeyboard(Keyboard.todayButtons)
else:
self.updateKeyboard(Keyboard.tomorrowButtons)
class HomeMessage(Message):
def __init__(self):
self.returnedMessage = Message.baseKeyboard
homeKeyboard = HomeMessage.returnHomeKeyboard()
self.returnedMessage["buttons"] = homeKeyboard
@staticmethod
def returnHomeKeyboard():\
return Keyboard.homeButtons
class FailMessage(BaseMessage):
def __init__(self):
super().__init__()
self.updateMessage("오류가 발생하였습니다.")
self.updateKeyboard(Keyboard.homeButtons)
class SuccessMessage(Message):
def __init__(self):
self.returnedMessage = "SUCCESS"
| mit |
40423247/2017springcd_hw | plugin/sitemap/sitemap.py | 292 | 8774 | # -*- coding: utf-8 -*-
'''
Sitemap
-------
The sitemap plugin generates plain-text or XML sitemaps.
'''
from __future__ import unicode_literals
import re
import collections
import os.path
from datetime import datetime
from logging import warning, info
from codecs import open
from pytz import timezone
from pelican import signals, contents
from pelican.utils import get_date
TXT_HEADER = """{0}/index.html
{0}/archives.html
{0}/tags.html
{0}/categories.html
"""
XML_HEADER = """<?xml version="1.0" encoding="utf-8"?>
<urlset xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
"""
XML_URL = """
<url>
<loc>{0}/{1}</loc>
<lastmod>{2}</lastmod>
<changefreq>{3}</changefreq>
<priority>{4}</priority>
</url>
"""
XML_FOOTER = """
</urlset>
"""
def format_date(date):
if date.tzinfo:
tz = date.strftime('%z')
tz = tz[:-2] + ':' + tz[-2:]
else:
tz = "-00:00"
return date.strftime("%Y-%m-%dT%H:%M:%S") + tz
class SitemapGenerator(object):
def __init__(self, context, settings, path, theme, output_path, *null):
self.output_path = output_path
self.context = context
self.now = datetime.now()
self.siteurl = settings.get('SITEURL')
self.default_timezone = settings.get('TIMEZONE', 'UTC')
self.timezone = getattr(self, 'timezone', self.default_timezone)
self.timezone = timezone(self.timezone)
self.format = 'xml'
self.changefreqs = {
'articles': 'monthly',
'indexes': 'daily',
'pages': 'monthly'
}
self.priorities = {
'articles': 0.5,
'indexes': 0.5,
'pages': 0.5
}
self.sitemapExclude = []
config = settings.get('SITEMAP', {})
if not isinstance(config, dict):
warning("sitemap plugin: the SITEMAP setting must be a dict")
else:
fmt = config.get('format')
pris = config.get('priorities')
chfreqs = config.get('changefreqs')
self.sitemapExclude = config.get('exclude', [])
if fmt not in ('xml', 'txt'):
warning("sitemap plugin: SITEMAP['format'] must be `txt' or `xml'")
warning("sitemap plugin: Setting SITEMAP['format'] on `xml'")
elif fmt == 'txt':
self.format = fmt
return
valid_keys = ('articles', 'indexes', 'pages')
valid_chfreqs = ('always', 'hourly', 'daily', 'weekly', 'monthly',
'yearly', 'never')
if isinstance(pris, dict):
# We use items for Py3k compat. .iteritems() otherwise
for k, v in pris.items():
if k in valid_keys and not isinstance(v, (int, float)):
default = self.priorities[k]
warning("sitemap plugin: priorities must be numbers")
warning("sitemap plugin: setting SITEMAP['priorities']"
"['{0}'] on {1}".format(k, default))
pris[k] = default
self.priorities.update(pris)
elif pris is not None:
warning("sitemap plugin: SITEMAP['priorities'] must be a dict")
warning("sitemap plugin: using the default values")
if isinstance(chfreqs, dict):
# .items() for py3k compat.
for k, v in chfreqs.items():
if k in valid_keys and v not in valid_chfreqs:
default = self.changefreqs[k]
warning("sitemap plugin: invalid changefreq `{0}'".format(v))
warning("sitemap plugin: setting SITEMAP['changefreqs']"
"['{0}'] on '{1}'".format(k, default))
chfreqs[k] = default
self.changefreqs.update(chfreqs)
elif chfreqs is not None:
warning("sitemap plugin: SITEMAP['changefreqs'] must be a dict")
warning("sitemap plugin: using the default values")
def write_url(self, page, fd):
if getattr(page, 'status', 'published') != 'published':
return
# We can disable categories/authors/etc by using False instead of ''
if not page.save_as:
return
page_path = os.path.join(self.output_path, page.save_as)
if not os.path.exists(page_path):
return
lastdate = getattr(page, 'date', self.now)
try:
lastdate = self.get_date_modified(page, lastdate)
except ValueError:
warning("sitemap plugin: " + page.save_as + " has invalid modification date,")
warning("sitemap plugin: using date value as lastmod.")
lastmod = format_date(lastdate)
if isinstance(page, contents.Article):
pri = self.priorities['articles']
chfreq = self.changefreqs['articles']
elif isinstance(page, contents.Page):
pri = self.priorities['pages']
chfreq = self.changefreqs['pages']
else:
pri = self.priorities['indexes']
chfreq = self.changefreqs['indexes']
pageurl = '' if page.url == 'index.html' else page.url
#Exclude URLs from the sitemap:
if self.format == 'xml':
flag = False
for regstr in self.sitemapExclude:
if re.match(regstr, pageurl):
flag = True
break
if not flag:
fd.write(XML_URL.format(self.siteurl, pageurl, lastmod, chfreq, pri))
else:
fd.write(self.siteurl + '/' + pageurl + '\n')
def get_date_modified(self, page, default):
if hasattr(page, 'modified'):
if isinstance(page.modified, datetime):
return page.modified
return get_date(page.modified)
else:
return default
def set_url_wrappers_modification_date(self, wrappers):
for (wrapper, articles) in wrappers:
lastmod = datetime.min.replace(tzinfo=self.timezone)
for article in articles:
lastmod = max(lastmod, article.date.replace(tzinfo=self.timezone))
try:
modified = self.get_date_modified(article, datetime.min).replace(tzinfo=self.timezone)
lastmod = max(lastmod, modified)
except ValueError:
# Supressed: user will be notified.
pass
setattr(wrapper, 'modified', str(lastmod))
def generate_output(self, writer):
path = os.path.join(self.output_path, 'sitemap.{0}'.format(self.format))
pages = self.context['pages'] + self.context['articles'] \
+ [ c for (c, a) in self.context['categories']] \
+ [ t for (t, a) in self.context['tags']] \
+ [ a for (a, b) in self.context['authors']]
self.set_url_wrappers_modification_date(self.context['categories'])
self.set_url_wrappers_modification_date(self.context['tags'])
self.set_url_wrappers_modification_date(self.context['authors'])
for article in self.context['articles']:
pages += article.translations
info('writing {0}'.format(path))
with open(path, 'w', encoding='utf-8') as fd:
if self.format == 'xml':
fd.write(XML_HEADER)
else:
fd.write(TXT_HEADER.format(self.siteurl))
FakePage = collections.namedtuple('FakePage',
['status',
'date',
'url',
'save_as'])
for standard_page_url in ['index.html',
'archives.html',
'tags.html',
'categories.html']:
fake = FakePage(status='published',
date=self.now,
url=standard_page_url,
save_as=standard_page_url)
self.write_url(fake, fd)
for page in pages:
self.write_url(page, fd)
if self.format == 'xml':
fd.write(XML_FOOTER)
def get_generators(generators):
return SitemapGenerator
def register():
signals.get_generators.connect(get_generators)
| agpl-3.0 |
jamesbeebop/CouchPotatoServer | libs/enzyme/real.py | 180 | 4547 | # -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import struct
import logging
from exceptions import ParseError
import core
# http://www.pcisys.net/~melanson/codecs/rmff.htm
# http://www.pcisys.net/~melanson/codecs/
# get logging object
log = logging.getLogger(__name__)
class RealVideo(core.AVContainer):
def __init__(self, file):
core.AVContainer.__init__(self)
self.mime = 'video/real'
self.type = 'Real Video'
h = file.read(10)
try:
(object_id, object_size, object_version) = struct.unpack('>4sIH', h)
except struct.error:
# EOF.
raise ParseError()
if not object_id == '.RMF':
raise ParseError()
file_version, num_headers = struct.unpack('>II', file.read(8))
log.debug(u'size: %d, ver: %d, headers: %d' % \
(object_size, file_version, num_headers))
for _ in range(0, num_headers):
try:
oi = struct.unpack('>4sIH', file.read(10))
except (struct.error, IOError):
# Header data we expected wasn't there. File may be
# only partially complete.
break
if object_id == 'DATA' and oi[0] != 'INDX':
log.debug(u'INDX chunk expected after DATA but not found -- file corrupt')
break
(object_id, object_size, object_version) = oi
if object_id == 'DATA':
# Seek over the data chunk rather than reading it in.
file.seek(object_size - 10, 1)
else:
self._read_header(object_id, file.read(object_size - 10))
log.debug(u'%r [%d]' % (object_id, object_size - 10))
# Read all the following headers
def _read_header(self, object_id, s):
if object_id == 'PROP':
prop = struct.unpack('>9IHH', s)
log.debug(u'PROP: %r' % prop)
if object_id == 'MDPR':
mdpr = struct.unpack('>H7I', s[:30])
log.debug(u'MDPR: %r' % mdpr)
self.length = mdpr[7] / 1000.0
(stream_name_size,) = struct.unpack('>B', s[30:31])
stream_name = s[31:31 + stream_name_size]
pos = 31 + stream_name_size
(mime_type_size,) = struct.unpack('>B', s[pos:pos + 1])
mime = s[pos + 1:pos + 1 + mime_type_size]
pos += mime_type_size + 1
(type_specific_len,) = struct.unpack('>I', s[pos:pos + 4])
type_specific = s[pos + 4:pos + 4 + type_specific_len]
pos += 4 + type_specific_len
if mime[:5] == 'audio':
ai = core.AudioStream()
ai.id = mdpr[0]
ai.bitrate = mdpr[2]
self.audio.append(ai)
elif mime[:5] == 'video':
vi = core.VideoStream()
vi.id = mdpr[0]
vi.bitrate = mdpr[2]
self.video.append(vi)
else:
log.debug(u'Unknown: %r' % mime)
if object_id == 'CONT':
pos = 0
(title_len,) = struct.unpack('>H', s[pos:pos + 2])
self.title = s[2:title_len + 2]
pos += title_len + 2
(author_len,) = struct.unpack('>H', s[pos:pos + 2])
self.artist = s[pos + 2:pos + author_len + 2]
pos += author_len + 2
(copyright_len,) = struct.unpack('>H', s[pos:pos + 2])
self.copyright = s[pos + 2:pos + copyright_len + 2]
pos += copyright_len + 2
(comment_len,) = struct.unpack('>H', s[pos:pos + 2])
self.comment = s[pos + 2:pos + comment_len + 2]
Parser = RealVideo
| gpl-3.0 |
jamesbeebop/CouchPotatoServer | libs/html5lib/serializer/htmlserializer.py | 235 | 12897 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| gpl-3.0 |
suyashphadtare/vestasi-erp-final | erpnext/support/doctype/maintenance_schedule/maintenance_schedule.py | 31 | 9816 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import add_days, getdate, cint
from frappe import throw, _
from erpnext.utilities.transaction_base import TransactionBase, delete_events
from erpnext.stock.utils import get_valid_serial_nos
class MaintenanceSchedule(TransactionBase):
def get_item_details(self, item_code):
item = frappe.db.sql("""select item_name, description from `tabItem`
where name=%s""", (item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.set('maintenance_schedule_detail', [])
frappe.db.sql("""delete from `tabMaintenance Schedule Detail`
where parent=%s""", (self.name))
count = 1
for d in self.get('item_maintenance_detail'):
self.validate_maintenance_detail()
s_list = []
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits, d.sales_person)
for i in range(d.no_of_visits):
child = self.append('maintenance_schedule_detail')
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count + 1
child.sales_person = d.sales_person
self.save()
def on_submit(self):
if not self.get('maintenance_schedule_detail'):
throw(_("Please click on 'Generate Schedule' to get schedule"))
self.check_serial_no_added()
self.validate_schedule()
email_map = {}
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.validate_serial_no(serial_nos, d.start_date)
self.update_amc_date(serial_nos, d.end_date)
if d.sales_person not in email_map:
sp = frappe.get_doc("Sales Person", d.sales_person)
email_map[d.sales_person] = sp.get_email_id()
scheduled_date = frappe.db.sql("""select scheduled_date from
`tabMaintenance Schedule Detail` where sales_person=%s and item_code=%s and
parent=%s""", (d.sales_person, d.item_code, self.name), as_dict=1)
for key in scheduled_date:
if email_map[d.sales_person]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.name, d.item_code, self.customer)
frappe.get_doc({
"doctype": "Event",
"owner": email_map[d.sales_person] or self.owner,
"subject": description,
"description": description,
"starts_on": key["scheduled_date"] + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_permissions=1)
frappe.db.set(self, 'status', 'Submitted')
def create_schedule_list(self, start_date, end_date, no_of_visit, sales_person):
schedule_list = []
start_date_copy = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff / no_of_visit
for visit in range(cint(no_of_visit)):
if (getdate(start_date_copy) < getdate(end_date)):
start_date_copy = add_days(start_date_copy, add_by)
if len(schedule_list) < no_of_visit:
schedule_date = self.validate_schedule_date_for_holiday_list(getdate(start_date_copy),
sales_person)
if schedule_date > getdate(end_date):
schedule_date = getdate(end_date)
schedule_list.append(schedule_date)
return schedule_list
def validate_schedule_date_for_holiday_list(self, schedule_date, sales_person):
from erpnext.accounts.utils import get_fiscal_year
validated = False
fy_details = ""
try:
fy_details = get_fiscal_year(date=schedule_date, verbose=0)
except Exception:
pass
if fy_details and fy_details[0]:
# check holiday list in employee master
holiday_list = frappe.db.sql_list("""select h.holiday_date from `tabEmployee` emp,
`tabSales Person` sp, `tabHoliday` h, `tabHoliday List` hl
where sp.name=%s and emp.name=sp.employee
and hl.name=emp.holiday_list and
h.parent=hl.name and
hl.fiscal_year=%s""", (sales_person, fy_details[0]))
if not holiday_list:
# check global holiday list
holiday_list = frappe.db.sql("""select h.holiday_date from
`tabHoliday` h, `tabHoliday List` hl
where h.parent=hl.name and ifnull(hl.is_default, 0) = 1
and hl.fiscal_year=%s""", fy_details[0])
if not validated and holiday_list:
if schedule_date in holiday_list:
schedule_date = add_days(schedule_date, -1)
else:
validated = True
return schedule_date
def validate_dates_with_periodicity(self):
for d in self.get("item_maintenance_detail"):
if d.start_date and d.end_date and d.periodicity and d.periodicity!="Random":
date_diff = (getdate(d.end_date) - getdate(d.start_date)).days + 1
days_in_period = {
"Weekly": 7,
"Monthly": 30,
"Quarterly": 90,
"Half Yearly": 180,
"Yearly": 365
}
if date_diff < days_in_period[d.periodicity]:
throw(_("Row {0}: To set {1} periodicity, difference between from and to date \
must be greater than or equal to {2}")
.format(d.idx, d.periodicity, days_in_period[d.periodicity]))
def validate_maintenance_detail(self):
if not self.get('item_maintenance_detail'):
throw(_("Please enter Maintaince Details first"))
for d in self.get('item_maintenance_detail'):
if not d.item_code:
throw(_("Please select item code"))
elif not d.start_date or not d.end_date:
throw(_("Please select Start Date and End Date for Item {0}".format(d.item_code)))
elif not d.no_of_visits:
throw(_("Please mention no of visits required"))
elif not d.sales_person:
throw(_("Please select Incharge Person's name"))
if getdate(d.start_date) >= getdate(d.end_date):
throw(_("Start date should be less than end date for Item {0}").format(d.item_code))
def validate_sales_order(self):
for d in self.get('item_maintenance_detail'):
if d.prevdoc_docname:
chk = frappe.db.sql("""select ms.name from `tabMaintenance Schedule` ms,
`tabMaintenance Schedule Item` msi where msi.parent=ms.name and
msi.prevdoc_docname=%s and ms.docstatus=1""", d.prevdoc_docname)
if chk:
throw(_("Maintenance Schedule {0} exists against {0}").format(chk[0][0], d.prevdoc_docname))
def validate(self):
self.validate_maintenance_detail()
self.validate_dates_with_periodicity()
self.validate_sales_order()
def on_update(self):
frappe.db.set(self, 'status', 'Draft')
def update_amc_date(self, serial_nos, amc_expiry_date=None):
for serial_no in serial_nos:
serial_no_doc = frappe.get_doc("Serial No", serial_no)
serial_no_doc.amc_expiry_date = amc_expiry_date
serial_no_doc.save()
def validate_serial_no(self, serial_nos, amc_start_date):
for serial_no in serial_nos:
sr_details = frappe.db.get_value("Serial No", serial_no,
["warranty_expiry_date", "amc_expiry_date", "status", "delivery_date"], as_dict=1)
if not sr_details:
frappe.throw(_("Serial No {0} not found").format(serial_no))
if sr_details.warranty_expiry_date and sr_details.warranty_expiry_date>=amc_start_date:
throw(_("Serial No {0} is under warranty upto {1}").format(serial_no, sr_details.warranty_expiry_date))
if sr_details.amc_expiry_date and sr_details.amc_expiry_date >= amc_start_date:
throw(_("Serial No {0} is under maintenance contract upto {1}").format(serial_no, sr_details.amc_start_date))
if sr_details.status=="Delivered" and sr_details.delivery_date and \
sr_details.delivery_date >= amc_start_date:
throw(_("Maintenance start date can not be before delivery date for Serial No {0}").format(serial_no))
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in self.get('item_maintenance_detail'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in self.get('maintenance_schedule_detail'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
throw(_("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'"))
else:
for x in item_lst1:
if x not in item_lst2:
throw(_("Please click on 'Generate Schedule'"))
def check_serial_no_added(self):
serial_present =[]
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_present.append(d.item_code)
for m in self.get('maintenance_schedule_detail'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
throw(_("Please click on 'Generate Schedule' to fetch Serial No added for Item {0}").format(m.item_code))
def on_cancel(self):
for d in self.get('item_maintenance_detail'):
if d.serial_no:
serial_nos = get_valid_serial_nos(d.serial_no)
self.update_amc_date(serial_nos)
frappe.db.set(self, 'status', 'Cancelled')
delete_events(self.doctype, self.name)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doc("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"sales_person": "service_person"
}
}
}, target_doc)
return doclist
| agpl-3.0 |
hospace/ToughRADIUS | toughradius/console/admin/node.py | 5 | 3863 | #!/usr/bin/env python
#coding=utf-8
import sys, os
from bottle import Bottle
from bottle import request
from bottle import response
from bottle import redirect
from bottle import MakoTemplate
from bottle import static_file
from bottle import abort
from beaker.cache import cache_managers
from toughradius.console.libs.paginator import Paginator
from toughradius.console.libs import utils
from toughradius.console.websock import websock
from toughradius.console import models
from toughradius.console.base import *
from toughradius.console.admin import node_forms
from hashlib import md5
from twisted.python import log
import bottle
import datetime
import json
import functools
__prefix__ = "/node"
app = Bottle()
app.config['__prefix__'] = __prefix__
###############################################################################
# node manage
###############################################################################
@app.get('/', apply=auth_opr)
def node(db, render):
return render("sys_node_list", page_data=get_page_data(db.query(models.SlcNode)))
permit.add_route("/node", u"区域信息管理", u"系统管理", is_menu=True, order=1)
@app.get('/add', apply=auth_opr)
def node_add(db, render):
return render("base_form", form=node_forms.node_add_form())
@app.post('/add', apply=auth_opr)
def node_add_post(db, render):
form = node_forms.node_add_form()
if not form.validates(source=request.forms):
return render("base_form", form=form)
node = models.SlcNode()
node.node_name = form.d.node_name
node.node_desc = form.d.node_desc
db.add(node)
ops_log = models.SlcRadOperateLog()
ops_log.operator_name = get_cookie("username")
ops_log.operate_ip = get_cookie("login_ip")
ops_log.operate_time = utils.get_currtime()
ops_log.operate_desc = u'操作员(%s)新增区域信息:%s' % (get_cookie("username"), node.node_name)
db.add(ops_log)
db.commit()
redirect("/node")
permit.add_route("/node/add", u"新增区域", u"系统管理", order=1.01, is_open=False)
@app.get('/update', apply=auth_opr)
def node_update(db, render):
node_id = request.params.get("node_id")
form = node_forms.node_update_form()
form.fill(db.query(models.SlcNode).get(node_id))
return render("base_form", form=form)
@app.post('/update', apply=auth_opr)
def node_add_update(db, render):
form = node_forms.node_update_form()
if not form.validates(source=request.forms):
return render("base_form", form=form)
node = db.query(models.SlcNode).get(form.d.id)
node.node_name = form.d.node_name
node.node_desc = form.d.node_desc
ops_log = models.SlcRadOperateLog()
ops_log.operator_name = get_cookie("username")
ops_log.operate_ip = get_cookie("login_ip")
ops_log.operate_time = utils.get_currtime()
ops_log.operate_desc = u'操作员(%s)修改区域信息:%s' % (get_cookie("username"), node.node_name)
db.add(ops_log)
db.commit()
redirect("/node")
permit.add_route("/node/update", u"修改区域", u"系统管理", order=1.02, is_open=False)
@app.get('/delete', apply=auth_opr)
def node_delete(db, render):
node_id = request.params.get("node_id")
if db.query(models.SlcMember.member_id).filter_by(node_id=node_id).count() > 0:
return render("error", msg=u"该节点下有用户,不允许删除")
db.query(models.SlcNode).filter_by(id=node_id).delete()
ops_log = models.SlcRadOperateLog()
ops_log.operator_name = get_cookie("username")
ops_log.operate_ip = get_cookie("login_ip")
ops_log.operate_time = utils.get_currtime()
ops_log.operate_desc = u'操作员(%s)删除区域信息:%s' % (get_cookie("username"), node_id)
db.add(ops_log)
db.commit()
redirect("/node")
permit.add_route("/node/delete", u"删除区域", u"系统管理", order=1.03, is_open=False) | agpl-3.0 |
hendrikx-itc/python-minerva | tests/storage/test_outputdescriptor.py | 1 | 1255 | import unittest
from minerva.storage.valuedescriptor import ValueDescriptor
from minerva.storage.outputdescriptor import OutputDescriptor
from minerva.storage import datatype
class TestOutputDescriptor(unittest.TestCase):
def test_constructor(self):
value_descriptor = ValueDescriptor(
'x',
datatype.registry['smallint']
)
output_descriptor = OutputDescriptor(
value_descriptor
)
assert output_descriptor is not None
def test_serialize_smallint(self):
output_descriptor = OutputDescriptor(
ValueDescriptor('x', datatype.registry['smallint'])
)
assert output_descriptor.serialize(43) == '43'
def test_load_from_config(self):
config = {
'name': 'x',
'data_type': 'smallint',
'serializer_config': {
}
}
output_descriptor = OutputDescriptor.load(config)
self.assertIs(
output_descriptor.value_descriptor.data_type,
datatype.registry['smallint']
)
self.assertEqual(output_descriptor.value_descriptor.name, 'x')
self.assertEqual(config, output_descriptor.to_dict())
| gpl-3.0 |
sirchia/CouchPotatoServer | libs/jinja2/_markupsafe/_constants.py | 1535 | 4795 | # -*- coding: utf-8 -*-
"""
markupsafe._constants
~~~~~~~~~~~~~~~~~~~~~
Highlevel implementation of the Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
HTML_ENTITIES = {
'AElig': 198,
'Aacute': 193,
'Acirc': 194,
'Agrave': 192,
'Alpha': 913,
'Aring': 197,
'Atilde': 195,
'Auml': 196,
'Beta': 914,
'Ccedil': 199,
'Chi': 935,
'Dagger': 8225,
'Delta': 916,
'ETH': 208,
'Eacute': 201,
'Ecirc': 202,
'Egrave': 200,
'Epsilon': 917,
'Eta': 919,
'Euml': 203,
'Gamma': 915,
'Iacute': 205,
'Icirc': 206,
'Igrave': 204,
'Iota': 921,
'Iuml': 207,
'Kappa': 922,
'Lambda': 923,
'Mu': 924,
'Ntilde': 209,
'Nu': 925,
'OElig': 338,
'Oacute': 211,
'Ocirc': 212,
'Ograve': 210,
'Omega': 937,
'Omicron': 927,
'Oslash': 216,
'Otilde': 213,
'Ouml': 214,
'Phi': 934,
'Pi': 928,
'Prime': 8243,
'Psi': 936,
'Rho': 929,
'Scaron': 352,
'Sigma': 931,
'THORN': 222,
'Tau': 932,
'Theta': 920,
'Uacute': 218,
'Ucirc': 219,
'Ugrave': 217,
'Upsilon': 933,
'Uuml': 220,
'Xi': 926,
'Yacute': 221,
'Yuml': 376,
'Zeta': 918,
'aacute': 225,
'acirc': 226,
'acute': 180,
'aelig': 230,
'agrave': 224,
'alefsym': 8501,
'alpha': 945,
'amp': 38,
'and': 8743,
'ang': 8736,
'apos': 39,
'aring': 229,
'asymp': 8776,
'atilde': 227,
'auml': 228,
'bdquo': 8222,
'beta': 946,
'brvbar': 166,
'bull': 8226,
'cap': 8745,
'ccedil': 231,
'cedil': 184,
'cent': 162,
'chi': 967,
'circ': 710,
'clubs': 9827,
'cong': 8773,
'copy': 169,
'crarr': 8629,
'cup': 8746,
'curren': 164,
'dArr': 8659,
'dagger': 8224,
'darr': 8595,
'deg': 176,
'delta': 948,
'diams': 9830,
'divide': 247,
'eacute': 233,
'ecirc': 234,
'egrave': 232,
'empty': 8709,
'emsp': 8195,
'ensp': 8194,
'epsilon': 949,
'equiv': 8801,
'eta': 951,
'eth': 240,
'euml': 235,
'euro': 8364,
'exist': 8707,
'fnof': 402,
'forall': 8704,
'frac12': 189,
'frac14': 188,
'frac34': 190,
'frasl': 8260,
'gamma': 947,
'ge': 8805,
'gt': 62,
'hArr': 8660,
'harr': 8596,
'hearts': 9829,
'hellip': 8230,
'iacute': 237,
'icirc': 238,
'iexcl': 161,
'igrave': 236,
'image': 8465,
'infin': 8734,
'int': 8747,
'iota': 953,
'iquest': 191,
'isin': 8712,
'iuml': 239,
'kappa': 954,
'lArr': 8656,
'lambda': 955,
'lang': 9001,
'laquo': 171,
'larr': 8592,
'lceil': 8968,
'ldquo': 8220,
'le': 8804,
'lfloor': 8970,
'lowast': 8727,
'loz': 9674,
'lrm': 8206,
'lsaquo': 8249,
'lsquo': 8216,
'lt': 60,
'macr': 175,
'mdash': 8212,
'micro': 181,
'middot': 183,
'minus': 8722,
'mu': 956,
'nabla': 8711,
'nbsp': 160,
'ndash': 8211,
'ne': 8800,
'ni': 8715,
'not': 172,
'notin': 8713,
'nsub': 8836,
'ntilde': 241,
'nu': 957,
'oacute': 243,
'ocirc': 244,
'oelig': 339,
'ograve': 242,
'oline': 8254,
'omega': 969,
'omicron': 959,
'oplus': 8853,
'or': 8744,
'ordf': 170,
'ordm': 186,
'oslash': 248,
'otilde': 245,
'otimes': 8855,
'ouml': 246,
'para': 182,
'part': 8706,
'permil': 8240,
'perp': 8869,
'phi': 966,
'pi': 960,
'piv': 982,
'plusmn': 177,
'pound': 163,
'prime': 8242,
'prod': 8719,
'prop': 8733,
'psi': 968,
'quot': 34,
'rArr': 8658,
'radic': 8730,
'rang': 9002,
'raquo': 187,
'rarr': 8594,
'rceil': 8969,
'rdquo': 8221,
'real': 8476,
'reg': 174,
'rfloor': 8971,
'rho': 961,
'rlm': 8207,
'rsaquo': 8250,
'rsquo': 8217,
'sbquo': 8218,
'scaron': 353,
'sdot': 8901,
'sect': 167,
'shy': 173,
'sigma': 963,
'sigmaf': 962,
'sim': 8764,
'spades': 9824,
'sub': 8834,
'sube': 8838,
'sum': 8721,
'sup': 8835,
'sup1': 185,
'sup2': 178,
'sup3': 179,
'supe': 8839,
'szlig': 223,
'tau': 964,
'there4': 8756,
'theta': 952,
'thetasym': 977,
'thinsp': 8201,
'thorn': 254,
'tilde': 732,
'times': 215,
'trade': 8482,
'uArr': 8657,
'uacute': 250,
'uarr': 8593,
'ucirc': 251,
'ugrave': 249,
'uml': 168,
'upsih': 978,
'upsilon': 965,
'uuml': 252,
'weierp': 8472,
'xi': 958,
'yacute': 253,
'yen': 165,
'yuml': 255,
'zeta': 950,
'zwj': 8205,
'zwnj': 8204
}
| gpl-3.0 |
slevenhagen/odoomrp-wip-npg | mrp_production_capacity/models/mrp.py | 2 | 4275 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, fields, api, _
import sys
class MrpWorkcenter(models.Model):
_inherit = 'mrp.workcenter'
capacity_per_cycle = fields.Float(
string='Capacity per Cycle Max.', help='Capacity per cycle maximum.')
capacity_per_cycle_min = fields.Float(
string='Capacity per Cycle Min.', help='Capacity per cycle minimum.')
class MrpRoutingWorkcenter(models.Model):
_inherit = 'mrp.routing.workcenter'
limited_production_capacity = fields.Boolean()
class MrpProduction(models.Model):
_inherit = 'mrp.production'
@api.multi
def product_qty_change_production_capacity(self, product_qty=0,
routing_id=False):
result = {}
routing_obj = self.env['mrp.routing']
if product_qty and routing_id:
routing = routing_obj.browse(routing_id)
for line in routing.workcenter_lines:
if line.limited_production_capacity:
capacity_min = (
line.workcenter_id.capacity_per_cycle_min or
sys.float_info.min)
capacity_max = (line.workcenter_id.capacity_per_cycle or
sys.float_info.max)
if capacity_min and capacity_max:
if (product_qty < capacity_min or
product_qty > capacity_max):
warning = {
'title': _('Warning!'),
'message': _('Product QTY < Capacity per cycle'
' minimun, or > Capacity per'
' cycle maximun')
}
result['warning'] = warning
return result
@api.one
@api.onchange('routing_id')
def onchange_routing(self):
if self.routing_id:
for line in self.routing_id.workcenter_lines:
if (line.limited_production_capacity and
line.workcenter_id.capacity_per_cycle):
self.product_qty = line.workcenter_id.capacity_per_cycle
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
@api.multi
def workcenter_change_production_capacity(self, product_qty=0,
workcenter_id=False):
result = {}
result['value'] = {}
workcenter_obj = self.env['mrp.workcenter']
if product_qty and workcenter_id:
workcenter = workcenter_obj.browse(workcenter_id)
capacity_min = (workcenter.capacity_per_cycle_min or
sys.float_info.min)
capacity_max = (workcenter.capacity_per_cycle or
sys.float_info.max)
if capacity_min and capacity_max:
if (product_qty < capacity_min or
product_qty > capacity_max):
warning = {
'title': _('Warning!'),
'message': _('Product QTY < Capacity per cycle'
' minimun, or > Capacity per'
' cycle maximun')
}
result['warning'] = warning
return result
| agpl-3.0 |